repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/object_detection/exporter_lib_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for exporter_lib_v2.py."""
from __future__ import division
import io
import os
import unittest
from absl.testing import parameterized
import numpy as np
from PIL import Image
import six
import tensorflow.compat.v2 as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import pipeline_pb2
from object_detection.utils import dataset_util
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-importing-member,g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=2)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs):
return_dict = {'image': self._conv(preprocessed_inputs)}
if 'side_inp_1' in side_inputs:
return_dict['image'] += side_inputs['side_inp_1']
return return_dict
def postprocess(self, prediction_dict, true_image_shapes):
predict_tensor_sum = tf.reduce_sum(prediction_dict['image'])
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]], tf.float32),
'detection_scores': predict_tensor_sum + tf.constant(
[[0.7, 0.6], [0.9, 0.0]], tf.float32),
'detection_classes': tf.constant([[0, 1],
[1, 0]], tf.float32),
'num_detections': tf.constant([2, 1], tf.float32),
}
return postprocessed_tensors
def predict_masks_from_boxes(self, prediction_dict, true_image_shapes, boxes):
output_dict = self.postprocess(prediction_dict, true_image_shapes)
output_dict.update({
'detection_masks': tf.ones(shape=(1, 2, 16), dtype=tf.float32),
})
return output_dict
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase):
def _save_checkpoint_from_mock_model(
self, checkpoint_dir, conv_weight_scalar=6.0):
mock_model = FakeModel(conv_weight_scalar)
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'encoded_image_string_tensor'},
{'input_type': 'tf_example'},
)
def test_export_yields_correct_directory_structure(
self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'variables', 'variables.index')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'variables',
'variables.data-00000-of-00001')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'checkpoint', 'ckpt-0.index')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'checkpoint', 'ckpt-0.data-00000-of-00001')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'pipeline.config')))
def get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.uint8)
if input_type == 'float_image_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.float32)
elif input_type == 'encoded_image_string_tensor':
image = Image.new('RGB', (20, 20))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
return [example]
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'encoded_image_string_tensor'},
{'input_type': 'tf_example'},
{'input_type': 'float_image_tensor'},
)
def test_export_saved_model_and_run_inference(
self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
image = self.get_dummy_input(input_type)
detections = detect_fn(tf.constant(image))
detection_fields = fields.DetectionResultFields
self.assertAllClose(detections[detection_fields.detection_boxes],
[[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(detections[detection_fields.detection_scores],
[[0.7, 0.6], [0.9, 0.0]])
self.assertAllClose(detections[detection_fields.detection_classes],
[[1, 2], [2, 1]])
self.assertAllClose(detections[detection_fields.num_detections], [2, 1])
@parameterized.parameters(
{'use_default_serving': True},
{'use_default_serving': False}
)
def test_export_saved_model_and_run_inference_with_side_inputs(
self, input_type='image_tensor', use_default_serving=True):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
use_side_inputs=True,
side_input_shapes='1/2,2',
side_input_names='side_inp_1,side_inp_2',
side_input_types='tf.float32,tf.uint8')
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.constant(self.get_dummy_input(input_type))
side_input_1 = np.ones((1,), dtype=np.float32)
side_input_2 = np.ones((2, 2), dtype=np.uint8)
if use_default_serving:
detections = detect_fn_sig(input_tensor=image,
side_inp_1=tf.constant(side_input_1),
side_inp_2=tf.constant(side_input_2))
else:
detections = detect_fn(image,
tf.constant(side_input_1),
tf.constant(side_input_2))
detection_fields = fields.DetectionResultFields
self.assertAllClose(detections[detection_fields.detection_boxes],
[[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(detections[detection_fields.detection_scores],
[[400.7, 400.6], [400.9, 400.0]])
self.assertAllClose(detections[detection_fields.detection_classes],
[[1, 2], [2, 1]])
self.assertAllClose(detections[detection_fields.num_detections], [2, 1])
def test_export_checkpoint_and_run_inference_with_image(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
mock_model = FakeModel()
ckpt = tf.compat.v2.train.Checkpoint(
model=mock_model)
checkpoint_dir = os.path.join(tmp_dir, 'output', 'checkpoint')
manager = tf.compat.v2.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=7)
ckpt.restore(manager.latest_checkpoint).expect_partial()
fake_image = tf.ones(shape=[1, 5, 5, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
detections = mock_model.postprocess(predictions, true_image_shapes)
# 150 = conv_weight_scalar * height * width * channels = 2 * 5 * 5 * 3.
self.assertAllClose(detections['detection_scores'],
[[150 + 0.7, 150 + 0.6], [150 + 0.9, 150 + 0.0]])
class DetectionFromImageAndBoxModuleTest(tf.test.TestCase):
def get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor' or input_type == 'image_and_boxes_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.uint8)
if input_type == 'float_image_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.float32)
elif input_type == 'encoded_image_string_tensor':
image = Image.new('RGB', (20, 20))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
return [example]
def _save_checkpoint_from_mock_model(self,
checkpoint_dir,
conv_weight_scalar=6.0):
mock_model = FakeModel(conv_weight_scalar)
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def test_export_saved_model_and_run_inference_for_segmentation(
self, input_type='image_and_boxes_tensor'):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
image = self.get_dummy_input(input_type)
boxes = tf.constant([
[
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8],
],
])
detections = detect_fn(tf.constant(image), boxes)
detection_fields = fields.DetectionResultFields
self.assertIn(detection_fields.detection_masks, detections)
self.assertListEqual(
list(detections[detection_fields.detection_masks].shape), [1, 2, 16])
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 16,191 | 41.610526 | 85 | py |
models | models-master/research/object_detection/model_lib.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import os
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
import tensorflow.compat.v2 as tf2
import tf_slim as slim
from object_detection import eval_util
from object_detection import exporter as exporter_lib
from object_detection import inputs
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import learn as contrib_learn
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
# A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file':
config_util.get_configs_from_pipeline_file,
'create_pipeline_proto_from_configs':
config_util.create_pipeline_proto_from_configs,
'merge_external_params_with_configs':
config_util.merge_external_params_with_configs,
'create_train_input_fn':
inputs.create_train_input_fn,
'create_eval_input_fn':
inputs.create_eval_input_fn,
'create_predict_input_fn':
inputs.create_predict_input_fn,
'detection_model_fn_base':
model_builder.build,
}
def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
max_number_of_boxes):
"""Extracts groundtruth data from detection_model and prepares it for eval.
Args:
detection_model: A `DetectionModel` object.
class_agnostic: Whether the detections are class_agnostic.
max_number_of_boxes: Max number of groundtruth boxes.
Returns:
A tuple of:
groundtruth: Dictionary with the following fields:
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,
in normalized coordinates.
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes.
'groundtruth_masks': 4D float32 tensor of instance masks (if provided in
groundtruth)
'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating
is_crowd annotations (if provided in groundtruth).
'groundtruth_area': [batch_size, num_boxes] float32 tensor indicating
the area (in the original absolute coordinates) of annotations (if
provided in groundtruth).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image..
'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32
tensor of keypoints (if provided in groundtruth).
'groundtruth_dp_num_points_list': [batch_size, num_boxes] int32 tensor
with the number of DensePose points for each instance (if provided in
groundtruth).
'groundtruth_dp_part_ids_list': [batch_size, num_boxes,
max_sampled_points] int32 tensor with the part ids for each DensePose
sampled point (if provided in groundtruth).
'groundtruth_dp_surface_coords_list': [batch_size, num_boxes,
max_sampled_points, 4] containing the DensePose surface coordinates for
each sampled point (if provided in groundtruth).
'groundtruth_track_ids_list': [batch_size, num_boxes] int32 tensor
with track ID for each instance (if provided in groundtruth).
'groundtruth_group_of': [batch_size, num_boxes] bool tensor indicating
group_of annotations (if provided in groundtruth).
'groundtruth_labeled_classes': [batch_size, num_classes] int64
tensor of 1-indexed classes.
'groundtruth_verified_neg_classes': [batch_size, num_classes] float32
K-hot representation of 1-indexed classes which were verified as not
present in the image.
'groundtruth_not_exhaustive_classes': [batch_size, num_classes] K-hot
representation of 1-indexed classes which don't have all of their
instances marked exhaustively.
'input_data_fields.groundtruth_image_classes': integer representation of
the classes that were sent for verification for a given image. Note that
this field does not support batching as the number of classes can be
variable.
class_agnostic: Boolean indicating whether detections are class agnostic.
"""
input_data_fields = fields.InputDataFields()
groundtruth_boxes = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.boxes))
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
if class_agnostic:
groundtruth_classes_one_hot = tf.ones(
[groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])
else:
groundtruth_classes_one_hot = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.classes))
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes
}
if detection_model.groundtruth_has_field(
input_data_fields.groundtruth_image_classes):
groundtruth_image_classes_k_hot = tf.stack(
detection_model.groundtruth_lists(
input_data_fields.groundtruth_image_classes))
groundtruth_image_classes = tf.expand_dims(
tf.where(groundtruth_image_classes_k_hot > 0)[:, 1], 0)
# Adds back label_id_offset as it is subtracted in
# convert_labeled_classes_to_k_hot.
groundtruth[
input_data_fields.
groundtruth_image_classes] = groundtruth_image_classes + label_id_offset
if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.masks))
if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):
groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))
if detection_model.groundtruth_has_field(input_data_fields.groundtruth_area):
groundtruth[input_data_fields.groundtruth_area] = tf.stack(
detection_model.groundtruth_lists(input_data_fields.groundtruth_area))
if detection_model.groundtruth_has_field(fields.BoxListFields.keypoints):
groundtruth[input_data_fields.groundtruth_keypoints] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.keypoints))
if detection_model.groundtruth_has_field(
fields.BoxListFields.keypoint_depths):
groundtruth[input_data_fields.groundtruth_keypoint_depths] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.keypoint_depths))
groundtruth[
input_data_fields.groundtruth_keypoint_depth_weights] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.keypoint_depth_weights))
if detection_model.groundtruth_has_field(
fields.BoxListFields.keypoint_visibilities):
groundtruth[input_data_fields.groundtruth_keypoint_visibilities] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.keypoint_visibilities))
if detection_model.groundtruth_has_field(fields.BoxListFields.group_of):
groundtruth[input_data_fields.groundtruth_group_of] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.group_of))
label_id_offset_paddings = tf.constant([[0, 0], [1, 0]])
if detection_model.groundtruth_has_field(
input_data_fields.groundtruth_verified_neg_classes):
groundtruth[input_data_fields.groundtruth_verified_neg_classes] = tf.pad(
tf.stack(
detection_model.groundtruth_lists(
input_data_fields.groundtruth_verified_neg_classes)),
label_id_offset_paddings)
if detection_model.groundtruth_has_field(
input_data_fields.groundtruth_not_exhaustive_classes):
groundtruth[input_data_fields.groundtruth_not_exhaustive_classes] = tf.pad(
tf.stack(
detection_model.groundtruth_lists(
input_data_fields.groundtruth_not_exhaustive_classes)),
label_id_offset_paddings)
if detection_model.groundtruth_has_field(
fields.BoxListFields.densepose_num_points):
groundtruth[input_data_fields.groundtruth_dp_num_points] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.densepose_num_points))
if detection_model.groundtruth_has_field(
fields.BoxListFields.densepose_part_ids):
groundtruth[input_data_fields.groundtruth_dp_part_ids] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.densepose_part_ids))
if detection_model.groundtruth_has_field(
fields.BoxListFields.densepose_surface_coords):
groundtruth[input_data_fields.groundtruth_dp_surface_coords] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.densepose_surface_coords))
if detection_model.groundtruth_has_field(fields.BoxListFields.track_ids):
groundtruth[input_data_fields.groundtruth_track_ids] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.track_ids))
if detection_model.groundtruth_has_field(
input_data_fields.groundtruth_labeled_classes):
groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.pad(
tf.stack(
detection_model.groundtruth_lists(
input_data_fields.groundtruth_labeled_classes)),
label_id_offset_paddings)
groundtruth[input_data_fields.num_groundtruth_boxes] = (
tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))
return groundtruth
def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
"""Unstacks all tensors in `tensor_dict` along 0th dimension.
Unstacks tensor from the tensor dict along 0th dimension and returns a
tensor_dict containing values that are lists of unstacked, unpadded tensors.
Tensors in the `tensor_dict` are expected to be of one of the three shapes:
1. [batch_size]
2. [batch_size, height, width, channels]
3. [batch_size, num_boxes, d1, d2, ... dn]
When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3
above are sliced along the `num_boxes` dimension using the value in tensor
field.InputDataFields.num_groundtruth_boxes.
Note that this function has a static list of input data fields and has to be
kept in sync with the InputDataFields defined in core/standard_fields.py
Args:
tensor_dict: A dictionary of batched groundtruth tensors.
unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`
dimension of the groundtruth tensors.
Returns:
A dictionary where the keys are from fields.InputDataFields and values are
a list of unstacked (optionally unpadded) tensors.
Raises:
ValueError: If unpad_tensors is True and `tensor_dict` does not contain
`num_groundtruth_boxes` tensor.
"""
unbatched_tensor_dict = {
key: tf.unstack(tensor) for key, tensor in tensor_dict.items()
}
if unpad_groundtruth_tensors:
if (fields.InputDataFields.num_groundtruth_boxes
not in unbatched_tensor_dict):
raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '
'Keys available: {}'.format(
unbatched_tensor_dict.keys()))
unbatched_unpadded_tensor_dict = {}
unpad_keys = set([
# List of input data fields that are padded along the num_boxes
# dimension. This list has to be kept in sync with InputDataFields in
# standard_fields.py.
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_instance_mask_weights,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_depths,
fields.InputDataFields.groundtruth_keypoint_depth_weights,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords,
fields.InputDataFields.groundtruth_track_ids,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_weights
]).intersection(set(unbatched_tensor_dict.keys()))
for key in unpad_keys:
unpadded_tensor_list = []
for num_gt, padded_tensor in zip(
unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
unbatched_tensor_dict[key]):
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
padded_tensor)
slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)
slice_size = tf.stack(
[num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict
def provide_groundtruth(model, labels, training_step=None):
"""Provides the labels to a model as groundtruth.
This helper function extracts the corresponding boxes, classes,
keypoints, weights, masks, etc. from the labels, and provides it
as groundtruth to the models.
Args:
model: The detection model to provide groundtruth to.
labels: The labels for the training or evaluation inputs.
training_step: int, optional. The training step for the model. Useful for
models which want to anneal loss weights.
"""
gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
gt_masks_list = None
if fields.InputDataFields.groundtruth_instance_masks in labels:
gt_masks_list = labels[fields.InputDataFields.groundtruth_instance_masks]
gt_mask_weights_list = None
if fields.InputDataFields.groundtruth_instance_mask_weights in labels:
gt_mask_weights_list = labels[
fields.InputDataFields.groundtruth_instance_mask_weights]
gt_keypoints_list = None
if fields.InputDataFields.groundtruth_keypoints in labels:
gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
gt_keypoint_depths_list = None
gt_keypoint_depth_weights_list = None
if fields.InputDataFields.groundtruth_keypoint_depths in labels:
gt_keypoint_depths_list = (
labels[fields.InputDataFields.groundtruth_keypoint_depths])
gt_keypoint_depth_weights_list = (
labels[fields.InputDataFields.groundtruth_keypoint_depth_weights])
gt_keypoint_visibilities_list = None
if fields.InputDataFields.groundtruth_keypoint_visibilities in labels:
gt_keypoint_visibilities_list = labels[
fields.InputDataFields.groundtruth_keypoint_visibilities]
gt_dp_num_points_list = None
if fields.InputDataFields.groundtruth_dp_num_points in labels:
gt_dp_num_points_list = labels[
fields.InputDataFields.groundtruth_dp_num_points]
gt_dp_part_ids_list = None
if fields.InputDataFields.groundtruth_dp_part_ids in labels:
gt_dp_part_ids_list = labels[fields.InputDataFields.groundtruth_dp_part_ids]
gt_dp_surface_coords_list = None
if fields.InputDataFields.groundtruth_dp_surface_coords in labels:
gt_dp_surface_coords_list = labels[
fields.InputDataFields.groundtruth_dp_surface_coords]
gt_track_ids_list = None
if fields.InputDataFields.groundtruth_track_ids in labels:
gt_track_ids_list = labels[fields.InputDataFields.groundtruth_track_ids]
gt_weights_list = None
if fields.InputDataFields.groundtruth_weights in labels:
gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]
gt_confidences_list = None
if fields.InputDataFields.groundtruth_confidences in labels:
gt_confidences_list = labels[fields.InputDataFields.groundtruth_confidences]
gt_is_crowd_list = None
if fields.InputDataFields.groundtruth_is_crowd in labels:
gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]
gt_group_of_list = None
if fields.InputDataFields.groundtruth_group_of in labels:
gt_group_of_list = labels[fields.InputDataFields.groundtruth_group_of]
gt_area_list = None
if fields.InputDataFields.groundtruth_area in labels:
gt_area_list = labels[fields.InputDataFields.groundtruth_area]
gt_labeled_classes = None
if fields.InputDataFields.groundtruth_labeled_classes in labels:
gt_labeled_classes = labels[
fields.InputDataFields.groundtruth_labeled_classes]
gt_verified_neg_classes = None
if fields.InputDataFields.groundtruth_verified_neg_classes in labels:
gt_verified_neg_classes = labels[
fields.InputDataFields.groundtruth_verified_neg_classes]
gt_not_exhaustive_classes = None
if fields.InputDataFields.groundtruth_not_exhaustive_classes in labels:
gt_not_exhaustive_classes = labels[
fields.InputDataFields.groundtruth_not_exhaustive_classes]
groundtruth_image_classes = None
if fields.InputDataFields.groundtruth_image_classes in labels:
groundtruth_image_classes = labels[
fields.InputDataFields.groundtruth_image_classes]
model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list,
groundtruth_confidences_list=gt_confidences_list,
groundtruth_labeled_classes=gt_labeled_classes,
groundtruth_masks_list=gt_masks_list,
groundtruth_mask_weights_list=gt_mask_weights_list,
groundtruth_keypoints_list=gt_keypoints_list,
groundtruth_keypoint_visibilities_list=gt_keypoint_visibilities_list,
groundtruth_dp_num_points_list=gt_dp_num_points_list,
groundtruth_dp_part_ids_list=gt_dp_part_ids_list,
groundtruth_dp_surface_coords_list=gt_dp_surface_coords_list,
groundtruth_weights_list=gt_weights_list,
groundtruth_is_crowd_list=gt_is_crowd_list,
groundtruth_group_of_list=gt_group_of_list,
groundtruth_area_list=gt_area_list,
groundtruth_track_ids_list=gt_track_ids_list,
groundtruth_verified_neg_classes=gt_verified_neg_classes,
groundtruth_not_exhaustive_classes=gt_not_exhaustive_classes,
groundtruth_keypoint_depths_list=gt_keypoint_depths_list,
groundtruth_keypoint_depth_weights_list=gt_keypoint_depth_weights_list,
groundtruth_image_classes=groundtruth_image_classes,
training_step=training_step)
def create_model_fn(detection_model_fn,
configs,
hparams=None,
use_tpu=False,
postprocess_on_cpu=False):
"""Creates a model function for `Estimator`.
Args:
detection_model_fn: Function that returns a `DetectionModel` instance.
configs: Dictionary of pipeline config objects.
hparams: `HParams` object.
use_tpu: Boolean indicating whether model should be constructed for use on
TPU.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess
is scheduled on the host cpu.
Returns:
`model_fn` for `Estimator`.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
def model_fn(features, labels, mode, params=None):
"""Constructs the object detection model.
Args:
features: Dictionary of feature tensors, returned from `input_fn`.
labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
otherwise None.
mode: Mode key from tf.estimator.ModeKeys.
params: Parameter dictionary passed from the estimator.
Returns:
An `EstimatorSpec` that encapsulates the model and its serving
configurations.
"""
params = params or {}
total_loss, train_op, detections, export_outputs = None, None, None, None
is_training = mode == tf_estimator.ModeKeys.TRAIN
# Make sure to set the Keras learning phase. True during training,
# False for inference.
tf.keras.backend.set_learning_phase(is_training)
# Set policy for mixed-precision training with Keras-based models.
if use_tpu and train_config.use_bfloat16:
# Enable v2 behavior, as `mixed_bfloat16` is only supported in TF 2.0.
tf.keras.layers.enable_v2_dtype_behavior()
tf2.keras.mixed_precision.set_global_policy('mixed_bfloat16')
detection_model = detection_model_fn(
is_training=is_training, add_summaries=(not use_tpu))
scaffold_fn = None
if mode == tf_estimator.ModeKeys.TRAIN:
labels = unstack_batch(
labels,
unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
elif mode == tf_estimator.ModeKeys.EVAL:
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
if mode in (tf_estimator.ModeKeys.TRAIN, tf_estimator.ModeKeys.EVAL):
provide_groundtruth(detection_model, labels)
preprocessed_images = features[fields.InputDataFields.image]
side_inputs = detection_model.get_side_inputs(features)
if use_tpu and train_config.use_bfloat16:
with tf.tpu.bfloat16_scope():
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape], **side_inputs)
prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)
else:
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape], **side_inputs)
def postprocess_wrapper(args):
return detection_model.postprocess(args[0], args[1])
if mode in (tf_estimator.ModeKeys.EVAL, tf_estimator.ModeKeys.PREDICT):
if use_tpu and postprocess_on_cpu:
detections = tf.tpu.outside_compilation(
postprocess_wrapper,
(prediction_dict,
features[fields.InputDataFields.true_image_shape]))
else:
detections = postprocess_wrapper(
(prediction_dict,
features[fields.InputDataFields.true_image_shape]))
if mode == tf_estimator.ModeKeys.TRAIN:
load_pretrained = hparams.load_pretrained if hparams else False
if train_config.fine_tune_checkpoint and load_pretrained:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
asg_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map,
train_config.fine_tune_checkpoint,
include_global_step=False))
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
if mode in (tf_estimator.ModeKeys.TRAIN, tf_estimator.ModeKeys.EVAL):
if (mode == tf_estimator.ModeKeys.EVAL and
eval_config.use_dummy_loss_in_eval):
total_loss = tf.constant(1.0)
losses_dict = {'Loss/total_loss': total_loss}
else:
losses_dict = detection_model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if train_config.add_regularization_loss:
regularization_losses = detection_model.regularization_losses()
if use_tpu and train_config.use_bfloat16:
regularization_losses = ops.bfloat16_to_float32_nested(
regularization_losses)
if regularization_losses:
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=is_training)
graph_rewriter_fn()
# TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we
# can write learning rate summaries on TPU without host calls.
global_step = tf.train.get_or_create_global_step()
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
if mode == tf_estimator.ModeKeys.TRAIN:
if use_tpu:
training_optimizer = tf.tpu.CrossShardOptimizer(training_optimizer)
# Optionally freeze some layers by setting their gradients to be zero.
trainable_variables = None
include_variables = (
train_config.update_trainable_variables
if train_config.update_trainable_variables else None)
exclude_variables = (
train_config.freeze_variables
if train_config.freeze_variables else None)
trainable_variables = slim.filter_variables(
tf.trainable_variables(),
include_patterns=include_variables,
exclude_patterns=exclude_variables)
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
if not use_tpu:
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
summaries = [] if use_tpu else None
if train_config.summarize_gradients:
summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']
train_op = slim.optimizers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=training_optimizer,
update_ops=detection_model.updates(),
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
if mode == tf_estimator.ModeKeys.PREDICT:
exported_output = exporter_lib.add_output_tensor_nodes(detections)
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf_estimator.export.PredictOutput(exported_output)
}
eval_metric_ops = None
scaffold = None
if mode == tf_estimator.ModeKeys.EVAL:
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
groundtruth = _prepare_groundtruth_for_eval(
detection_model, class_agnostic,
eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[
fields.InputDataFields.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
if fields.InputDataFields.image_additional_channels in features:
eval_dict[fields.InputDataFields.image_additional_channels] = features[
fields.InputDataFields.image_additional_channels]
if class_agnostic:
category_index = label_map_util.create_class_agnostic_category_index()
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
vis_metric_ops = None
if not use_tpu and use_original_images:
keypoint_edges = [(kp.start, kp.end) for kp in eval_config.keypoint_edge
]
eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=eval_config.num_visualizations,
max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False,
keypoint_edges=keypoint_edges or None)
vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(
eval_dict)
# Eval metrics on a single example.
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, list(category_index.values()), eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)
for var in optimizer_summary_vars:
eval_metric_ops[var.op.name] = (var, tf.no_op())
if vis_metric_ops is not None:
eval_metric_ops.update(vis_metric_ops)
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
variables_to_restore,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
scaffold = tf.train.Scaffold(saver=saver)
# EVAL executes on CPU, so use regular non-TPU EstimatorSpec.
if use_tpu and mode != tf_estimator.ModeKeys.EVAL:
return tf_estimator.tpu.TPUEstimatorSpec(
mode=mode,
scaffold_fn=scaffold_fn,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metric_ops,
export_outputs=export_outputs)
else:
if scaffold is None:
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
sharded=True,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
scaffold = tf.train.Scaffold(saver=saver)
return tf_estimator.EstimatorSpec(
mode=mode,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
scaffold=scaffold)
return model_fn
def create_estimator_and_inputs(run_config,
hparams=None,
pipeline_config_path=None,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=1,
sample_1_of_n_eval_on_train_examples=1,
model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
override_eval_num_epochs=True,
save_final_config=False,
postprocess_on_cpu=False,
export_to_tpu=None,
**kwargs):
"""Creates `Estimator`, input functions, and steps.
Args:
run_config: A `RunConfig`.
hparams: (optional) A `HParams`.
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns: `model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False, an
`Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for
eval_input.
save_final_config: Whether to save final config (obtained after applying
overrides) to `estimator.model_dir`.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,
postprocess is scheduled on the host cpu.
export_to_tpu: When use_tpu and export_to_tpu are true,
`export_savedmodel()` exports a metagraph for serving on TPU besides the
one on CPU.
**kwargs: Additional keyword arguments for configuration override.
Returns:
A dictionary with the following fields:
'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fns': A list of all evaluation input functions.
'eval_input_names': A list of names for each evaluation input.
'eval_on_train_input_fn': An evaluation-on-train input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
if sample_1_of_n_eval_examples >= 1:
kwargs.update({'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples})
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, hparams, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
detection_model_fn = functools.partial(
detection_model_fn_base, model_config=model_config)
# Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_input_fns = []
for eval_input_config in eval_input_configs:
eval_input_fns.append(
create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config))
eval_input_names = [
eval_input_config.name for eval_input_config in eval_input_configs
]
eval_on_train_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_on_train_input_config,
model_config=model_config)
predict_input_fn = create_predict_input_fn(
model_config=model_config, predict_input_config=eval_input_configs[0])
# Read export_to_tpu from hparams if not passed.
if export_to_tpu is None and hparams is not None:
export_to_tpu = hparams.get('export_to_tpu', False)
tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',
use_tpu, export_to_tpu)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu,
postprocess_on_cpu)
if use_tpu_estimator:
estimator = tf_estimator.tpu.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
export_to_tpu=export_to_tpu,
eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU
params=params if params else {})
else:
estimator = tf_estimator.Estimator(model_fn=model_fn, config=run_config)
# Write the as-run pipeline config to disk.
if run_config.is_chief and save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
return dict(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fns=eval_input_fns,
eval_input_names=eval_input_names,
eval_on_train_input_fn=eval_on_train_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps)
def create_train_and_eval_specs(train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=False,
final_exporter_name='Servo',
eval_spec_names=None):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fns: A list of functions that produce features and labels on eval
data.
eval_on_train_input_fn: Function that produces features and labels for
evaluation on train data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_names: A list of string names for each `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is
True, the last `EvalSpec` in the list will correspond to training data. The
rest EvalSpecs in the list are evaluation datas.
"""
train_spec = tf_estimator.TrainSpec(
input_fn=train_input_fn, max_steps=train_steps)
if eval_spec_names is None:
eval_spec_names = [str(i) for i in range(len(eval_input_fns))]
eval_specs = []
for index, (eval_spec_name,
eval_input_fn) in enumerate(zip(eval_spec_names, eval_input_fns)):
# Uses final_exporter_name as exporter_name for the first eval spec for
# backward compatibility.
if index == 0:
exporter_name = final_exporter_name
else:
exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)
exporter = tf_estimator.FinalExporter(
name=exporter_name, serving_input_receiver_fn=predict_input_fn)
eval_specs.append(
tf_estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=None,
exporters=exporter))
if eval_on_train_data:
eval_specs.append(
tf_estimator.EvalSpec(
name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))
return train_spec, eval_specs
def _evaluate_checkpoint(estimator,
input_fn,
checkpoint_path,
name,
max_retries=0):
"""Evaluates a checkpoint.
Args:
estimator: Estimator object to use for evaluation.
input_fn: Input function to use for evaluation.
checkpoint_path: Path of the checkpoint to evaluate.
name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
Returns:
Estimator evaluation results.
"""
always_retry = True if max_retries < 0 else False
retries = 0
while always_retry or retries <= max_retries:
try:
return estimator.evaluate(
input_fn=input_fn,
steps=None,
checkpoint_path=checkpoint_path,
name=name)
except tf.errors.InvalidArgumentError as e:
if always_retry or retries < max_retries:
tf.logging.info('Retrying checkpoint evaluation after exception: %s', e)
retries += 1
else:
raise e
def continuous_eval_generator(estimator,
model_dir,
input_fn,
train_steps,
name,
max_retries=0):
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
Yields:
Pair of current step and eval_results.
"""
def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True
for ckpt in tf.train.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.')
try:
eval_results = _evaluate_checkpoint(
estimator=estimator,
input_fn=input_fn,
checkpoint_path=ckpt,
name=name,
max_retries=max_retries)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
yield (current_step, eval_results)
if current_step >= train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
def continuous_eval(estimator,
model_dir,
input_fn,
train_steps,
name,
max_retries=0):
"""Performs continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
"""
for current_step, eval_results in continuous_eval_generator(
estimator, model_dir, input_fn, train_steps, name, max_retries):
tf.logging.info('Step %s, Eval results: %s', current_step, eval_results)
def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns: `model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
save_final_config=True,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
export_strategies = [
contrib_learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return contrib_learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fns[0],
train_steps=train_steps,
eval_steps=None,
export_strategies=export_strategies,
eval_delay_secs=120,
)
| 50,837 | 42.712812 | 80 | py |
models | models-master/research/object_detection/export_tflite_graph_lib_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for export_tflite_graph_lib_tf2.py."""
from __future__ import division
import os
import unittest
import six
import tensorflow.compat.v2 as tf
from object_detection import export_tflite_graph_lib_tf2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
from google.protobuf import text_format
if six.PY2:
import mock # pylint: disable=g-importing-member,g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top
class FakeModel(model.DetectionModel):
def __init__(self):
super(FakeModel, self).__init__(num_classes=2)
self._conv = tf.keras.layers.Conv2D(
filters=1,
kernel_size=1,
strides=(1, 1),
padding='valid',
kernel_initializer=tf.keras.initializers.Constant(value=1.0))
def preprocess(self, inputs):
true_image_shapes = [] # Doesn't matter for the fake model.
return tf.identity(inputs), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
prediction_tensors = {'image': self._conv(preprocessed_inputs)}
with tf.control_dependencies([prediction_tensors['image']]):
prediction_tensors['box_encodings'] = tf.constant(
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]], tf.float32)
prediction_tensors['class_predictions_with_background'] = tf.constant(
[[[0.7, 0.6], [0.9, 0.0]]], tf.float32)
with tf.control_dependencies([
tf.convert_to_tensor(
prediction_tensors['image'].get_shape().as_list()[1:3])
]):
prediction_tensors['anchors'] = tf.constant(
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32)
return prediction_tensors
def postprocess(self, prediction_dict, true_image_shapes):
predict_tensor_sum = tf.reduce_sum(prediction_dict['image'])
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes':
tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]],
tf.float32),
'detection_scores':
predict_tensor_sum +
tf.constant([[0.7, 0.6], [0.9, 0.0]], tf.float32),
'detection_classes':
tf.constant([[0, 1], [1, 0]], tf.float32),
'num_detections':
tf.constant([2, 1], tf.float32),
'detection_keypoints':
tf.zeros([2, 17, 2], tf.float32),
'detection_keypoint_scores':
tf.zeros([2, 17], tf.float32),
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, from_detection_checkpoint):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ExportTfLiteGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_dir):
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _get_ssd_config(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold = 0.5
return pipeline_config
def _get_center_net_config(self):
pipeline_config_text = """
model {
center_net {
num_classes: 1
feature_extractor {
type: "mobilenet_v2_fpn"
}
image_resizer {
fixed_shape_resizer {
height: 10
width: 10
}
}
object_detection_task {
localization_loss {
l1_localization_loss {
}
}
}
object_center_params {
classification_loss {
}
max_box_predictions: 20
}
keypoint_estimation_task {
loss {
localization_loss {
l1_localization_loss {
}
}
classification_loss {
penalty_reduced_logistic_focal_loss {
}
}
}
}
}
}
"""
return text_format.Parse(
pipeline_config_text, pipeline_pb2.TrainEvalPipelineConfig())
# The tf.implements signature is important since it ensures MLIR legalization,
# so we test it here.
def test_postprocess_implements_signature(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
pipeline_config = self._get_ssd_config()
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
ckpt = tf.train.Checkpoint(model=detection_model)
manager = tf.train.CheckpointManager(ckpt, tmp_dir, max_to_keep=1)
ckpt.restore(manager.latest_checkpoint).expect_partial()
# The module helps build a TF graph appropriate for TFLite conversion.
detection_module = export_tflite_graph_lib_tf2.SSDModule(
pipeline_config=pipeline_config,
detection_model=detection_model,
max_detections=20,
use_regular_nms=True)
expected_signature = ('name: "TFLite_Detection_PostProcess" attr { key: '
'"max_detections" value { i: 20 } } attr { key: '
'"max_classes_per_detection" value { i: 1 } } attr '
'{ key: "use_regular_nms" value { b: true } } attr '
'{ key: "nms_score_threshold" value { f: 0.000000 }'
' } attr { key: "nms_iou_threshold" value { f: '
'0.500000 } } attr { key: "y_scale" value { f: '
'10.000000 } } attr { key: "x_scale" value { f: '
'10.000000 } } attr { key: "h_scale" value { f: '
'5.000000 } } attr { key: "w_scale" value { f: '
'5.000000 } } attr { key: "num_classes" value { i: '
'2 } }')
self.assertEqual(expected_signature,
detection_module.postprocess_implements_signature())
def test_unsupported_architecture(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
expected_message = 'Only ssd or center_net models are supported in tflite'
try:
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False)
except ValueError as e:
if expected_message not in str(e):
raise
else:
raise AssertionError('Exception not raised: %s' % expected_message)
def test_export_yields_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=self._get_ssd_config(),
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False)
self.assertTrue(
os.path.exists(
os.path.join(output_directory, 'saved_model', 'saved_model.pb')))
self.assertTrue(
os.path.exists(
os.path.join(output_directory, 'saved_model', 'variables',
'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(output_directory, 'saved_model', 'variables',
'variables.data-00000-of-00001')))
def test_exported_model_inference(self):
tmp_dir = self.get_temp_dir()
output_directory = os.path.join(tmp_dir, 'output')
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=self._get_ssd_config(),
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
detections = detect_fn_sig(image)
# The exported graph doesn't have numerically correct outputs, but there
# should be 4.
self.assertEqual(4, len(detections))
def test_center_net_inference_object_detection(self):
tmp_dir = self.get_temp_dir()
output_directory = os.path.join(tmp_dir, 'output')
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=self._get_center_net_config(),
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
detections = detect_fn_sig(image)
# The exported graph doesn't have numerically correct outputs, but there
# should be 4.
self.assertEqual(4, len(detections))
def test_center_net_inference_keypoint(self):
tmp_dir = self.get_temp_dir()
output_directory = os.path.join(tmp_dir, 'output')
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=self._get_center_net_config(),
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False,
include_keypoints=True)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
detections = detect_fn_sig(image)
# The exported graph doesn't have numerically correct outputs, but there
# should be 6 (4 for boxes, 2 for keypoints).
self.assertEqual(6, len(detections))
if __name__ == '__main__':
tf.test.main()
| 13,194 | 37.581871 | 91 | py |
models | models-master/research/object_detection/core/target_assigner.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import hungarian_matcher
from object_detection.utils import shape_utils
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
ResizeMethod = tf2.image.ResizeMethod
_DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder_instance,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder_instance: an object_detection.core.BoxCoder used to encode
matching groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder_instance, box_coder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder_instance
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: an int32 tensor of shape [num_anchors] containing result of anchor
groundtruth matching. Each position in the tensor indicates an anchor
and holds the following meaning:
(1) if match[i] >= 0, anchor i is matched with groundtruth match[i].
(2) if match[i]=-1, anchor i is marked to be background .
(3) if match[i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
# set scores on the gt boxes
scores = 1 - groundtruth_labels[:, 0]
groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return (cls_targets, cls_weights, reg_targets, reg_weights,
match.match_results)
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
if tf_version.is_tf2():
raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.')
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder_instance,
negative_class_weight=negative_class_weight)
def batch_assign(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label,
gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
# Assign an alias to avoid large refactor of existing users.
batch_assign_targets = batch_assign
def batch_get_targets(batch_match, groundtruth_tensor_list,
groundtruth_weights_list, unmatched_value,
unmatched_weight):
"""Returns targets based on anchor-groundtruth box matching results.
Args:
batch_match: An int32 tensor of shape [batch, num_anchors] containing the
result of target assignment returned by TargetAssigner.assign(..).
groundtruth_tensor_list: A list of groundtruth tensors of shape
[num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type.
groundtruth_weights_list: A list of weights, one per groundtruth tensor, of
shape [num_groundtruth].
unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as
groundtruth tensor containing target value for anchors that remain
unmatched.
unmatched_weight: Scalar weight to assign to anchors that remain unmatched.
Returns:
targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k]
containing targets for anchors.
weights: A float tensor of shape [batch, num_anchors] containing the weights
to assign to each target.
"""
match_list = tf.unstack(batch_match)
targets_list = []
weights_list = []
for match_tensor, groundtruth_tensor, groundtruth_weight in zip(
match_list, groundtruth_tensor_list, groundtruth_weights_list):
match_object = mat.Match(match_tensor)
targets = match_object.gather_based_on_match(
groundtruth_tensor,
unmatched_value=unmatched_value,
ignored_value=unmatched_value)
targets_list.append(targets)
weights = match_object.gather_based_on_match(
groundtruth_weight,
unmatched_value=unmatched_weight,
ignored_value=tf.zeros_like(unmatched_weight))
weights_list.append(weights)
return tf.stack(targets_list), tf.stack(weights_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.cast(positive_anchors, dtype=tf.float32)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background *
(1 - tf.cast(negative_mask, dtype=tf.float32)))
cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast(
explicit_example_mask, dtype=tf.float32) + implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
def _smallest_positive_root(a, b, c):
"""Returns the smallest positive root of a quadratic equation."""
discriminant = tf.sqrt(b ** 2 - 4 * a * c)
# TODO(vighneshb) We are currently using the slightly incorrect
# CenterNet implementation. The commented lines implement the fixed version
# in https://github.com/princeton-vl/CornerNet. Change the implementation
# after verifying it has no negative impact.
# root1 = (-b - discriminant) / (2 * a)
# root2 = (-b + discriminant) / (2 * a)
# return tf.where(tf.less(root1, 0), root2, root1)
return (-b + discriminant) / (2.0)
def max_distance_for_overlap(height, width, min_iou):
"""Computes how far apart bbox corners can lie while maintaining the iou.
Given a bounding box size, this function returns a lower bound on how far
apart the corners of another box can lie while still maintaining the given
IoU. The implementation is based on the `gaussian_radius` function in the
Objects as Points github repo: https://github.com/xingyizhou/CenterNet
Args:
height: A 1-D float Tensor representing height of the ground truth boxes.
width: A 1-D float Tensor representing width of the ground truth boxes.
min_iou: A float representing the minimum IoU desired.
Returns:
distance: A 1-D Tensor of distances, of the same length as the input
height and width tensors.
"""
# Given that the detected box is displaced at a distance `d`, the exact
# IoU value will depend on the angle at which each corner is displaced.
# We simplify our computation by assuming that each corner is displaced by
# a distance `d` in both x and y direction. This gives us a lower IoU than
# what is actually realizable and ensures that any box with corners less
# than `d` distance apart will always have an IoU greater than or equal
# to `min_iou`
# The following 3 cases can be worked on geometrically and come down to
# solving a quadratic inequality. In each case, to ensure `min_iou` we use
# the smallest positive root of the equation.
# Case where detected box is offset from ground truth and no box completely
# contains the other.
distance_detection_offset = _smallest_positive_root(
a=1, b=-(height + width),
c=width * height * ((1 - min_iou) / (1 + min_iou))
)
# Case where detection is smaller than ground truth and completely contained
# in it.
distance_detection_in_gt = _smallest_positive_root(
a=4, b=-2 * (height + width),
c=(1 - min_iou) * width * height
)
# Case where ground truth is smaller than detection and completely contained
# in it.
distance_gt_in_detection = _smallest_positive_root(
a=4 * min_iou, b=(2 * min_iou) * (width + height),
c=(min_iou - 1) * width * height
)
return tf.reduce_min([distance_detection_offset,
distance_gt_in_detection,
distance_detection_in_gt], axis=0)
def get_batch_predictions_from_indices(batch_predictions, indices):
"""Gets the values of predictions in a batch at the given indices.
The indices are expected to come from the offset targets generation functions
in this library. The returned value is intended to be used inside a loss
function.
Args:
batch_predictions: A tensor of shape [batch_size, height, width, channels]
or [batch_size, height, width, class, channels] for class-specific
features (e.g. keypoint joint offsets).
indices: A tensor of shape [num_instances, 3] for single class features or
[num_instances, 4] for multiple classes features.
Returns:
values: A tensor of shape [num_instances, channels] holding the predicted
values at the given indices.
"""
# Note, gather_nd (and its gradient scatter_nd) runs significantly slower (on
# TPU) than gather with flattened inputs, so reshape the tensor, flatten the
# indices, and run gather.
shape = shape_utils.combined_static_and_dynamic_shape(batch_predictions)
# [B, H, W, C] -> [H*W, W, 1] or [B, H, W, N, C] -> [H*W*N, W*N, N, 1]
rev_cum_interior_indices = tf.reverse(tf.math.cumprod(shape[-2:0:-1]), [0])
rev_cum_interior_indices = tf.concat([rev_cum_interior_indices, [1]], axis=0)
# Compute flattened indices and gather.
flattened_inds = tf.linalg.matmul(
indices, rev_cum_interior_indices[:, tf.newaxis])[:, 0]
batch_predictions_2d = tf.reshape(batch_predictions, [-1, shape[-1]])
return tf.gather(batch_predictions_2d, flattened_inds, axis=0)
def _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap):
"""Computes the standard deviation of the Gaussian kernel from box size.
Args:
boxes_height: A 1D tensor with shape [num_instances] representing the height
of each box.
boxes_width: A 1D tensor with shape [num_instances] representing the width
of each box.
min_overlap: The minimum IOU overlap that boxes need to have to not be
penalized.
Returns:
A 1D tensor with shape [num_instances] representing the computed Gaussian
sigma for each of the box.
"""
# We are dividing by 3 so that points closer than the computed
# distance have a >99% CDF.
sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap)
sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0
return sigma
def _preprocess_keypoints_and_weights(out_height, out_width, keypoints,
class_onehot, class_weights,
keypoint_weights, class_id,
keypoint_indices):
"""Preprocesses the keypoints and the corresponding keypoint weights.
This function performs several common steps to preprocess the keypoints and
keypoint weights features, including:
1) Select the subset of keypoints based on the keypoint indices, fill the
keypoint NaN values with zeros and convert to absolute coordinates.
2) Generate the weights of the keypoint using the following information:
a. The class of the instance.
b. The NaN value of the keypoint coordinates.
c. The provided keypoint weights.
Args:
out_height: An integer or an integer tensor indicating the output height
of the model.
out_width: An integer or an integer tensor indicating the output width of
the model.
keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2]
representing the original keypoint grountruth coordinates.
class_onehot: A float tensor of shape [num_instances, num_classes]
containing the class targets with the 0th index assumed to map to the
first non-background class.
class_weights: A float tensor of shape [num_instances] containing weights
for groundtruth instances.
keypoint_weights: A float tensor of shape
[num_instances, num_total_keypoints] representing the weights of each
keypoints.
class_id: int, the ID of the class (0-indexed) that contains the target
keypoints to consider in this task.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints that should be considered in this task.
Returns:
A tuple of two tensors:
keypoint_absolute: A float tensor of shape
[num_instances, num_keypoints, 2] which is the selected and updated
keypoint coordinates.
keypoint_weights: A float tensor of shape [num_instances, num_keypoints]
representing the updated weight of each keypoint.
"""
# Select the targets keypoints by their type ids and generate the mask
# of valid elements.
valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoints,
class_id=class_id,
class_onehot=class_onehot,
class_weights=class_weights,
keypoint_indices=keypoint_indices)
# Keypoint coordinates in absolute coordinate system.
# The shape of the tensors: [num_instances, num_keypoints, 2].
keypoints_absolute = keypoint_ops.to_absolute_coordinates(
keypoints, out_height, out_width)
# Assign default weights for the keypoints.
if keypoint_weights is None:
keypoint_weights = tf.ones_like(keypoints[:, :, 0])
else:
keypoint_weights = tf.gather(
keypoint_weights, indices=keypoint_indices, axis=1)
keypoint_weights = keypoint_weights * valid_mask
return keypoints_absolute, keypoint_weights
class CenterNetCenterHeatmapTargetAssigner(object):
"""Wrapper to compute the object center heatmap."""
def __init__(self,
stride,
min_overlap=0.7,
compute_heatmap_sparse=False,
keypoint_class_id=None,
keypoint_indices=None,
keypoint_weights_for_center=None,
box_heatmap_type='adaptive_gaussian',
heatmap_exponent=1.0):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
min_overlap: The minimum IOU overlap that boxes need to have to not be
penalized.
compute_heatmap_sparse: bool, indicating whether or not to use the sparse
version of the Op that computes the heatmap. The sparse version scales
better with number of classes, but in some cases is known to cause
OOM error. See (b/170989061).
keypoint_class_id: int, the ID of the class (0-indexed) that contains the
target keypoints to consider in this task.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
keypoint_weights_for_center: The keypoint weights used for calculating the
location of object center. The number of weights need to be the same as
the number of keypoints. The object center is calculated by the weighted
mean of the keypoint locations. If not provided, the object center is
determined by the center of the bounding box (default behavior).
box_heatmap_type: str, the algorithm used to compute the box heatmap,
used when calling the assign_center_targets_from_boxes method.
Options are:
'adaptaive_gaussian': A box-size adaptive Gaussian from the original
paper[1].
'iou': IOU based heatmap target where each point is assigned an IOU
based on its location, assuming that it produced a box centered at
that point with the correct size.
heatmap_exponent: float, The generated heatmap is exponentiated with
this number. A number > 1 will result in the heatmap being more peaky
and a number < 1 will cause the heatmap to be more spreadout.
"""
self._stride = stride
self._min_overlap = min_overlap
self._compute_heatmap_sparse = compute_heatmap_sparse
self._keypoint_class_id = keypoint_class_id
self._keypoint_indices = keypoint_indices
self._keypoint_weights_for_center = keypoint_weights_for_center
self._box_heatmap_type = box_heatmap_type
self._heatmap_exponent = heatmap_exponent
def assign_center_targets_from_boxes(self,
height,
width,
gt_boxes_list,
gt_classes_list,
gt_weights_list=None,
maximum_normalized_coordinate=1.1):
"""Computes the object center heatmap target.
Args:
height: int, height of input to the model. This is used to
determine the height of the output.
width: int, width of the input to the model. This is used to
determine the width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The box coordinates are expected in normalized coordinates.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_weights_list: A list of float tensors with shape [num_boxes]
representing the weight of each groundtruth detection box.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
heatmap: A Tensor of size [batch_size, output_height, output_width,
num_classes] representing the per class center heatmap. output_height
and output_width are computed by dividing the input height and width by
the stride specified during initialization.
"""
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)
heatmaps = []
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
# TODO(vighneshb) Replace the for loop with a batch version.
for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list,
gt_weights_list):
boxes = box_list.BoxList(boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1),
maximum_normalized_coordinate=maximum_normalized_coordinate)
# Get the box center coordinates. Each returned tensors have the shape of
# [num_instances]
(y_center, x_center, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
# Compute the sigma from box size. The tensor shape: [num_instances].
sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width,
self._min_overlap)
# Apply the Gaussian kernel to the center coordinates. Returned heatmap
# has shape of [out_height, out_width, num_classes]
if self._box_heatmap_type == 'adaptive_gaussian':
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=y_center,
x_coordinates=x_center,
sigma=sigma,
channel_onehot=class_targets,
channel_weights=weights,
sparse=self._compute_heatmap_sparse)
elif self._box_heatmap_type == 'iou':
heatmap = ta_utils.coordinates_to_iou(y_grid, x_grid, boxes,
class_targets, weights)
else:
raise ValueError(f'Unknown heatmap type - {self._box_heatmap_type}')
heatmap = tf.stop_gradient(heatmap)
heatmaps.append(heatmap)
# Return the stacked heatmaps over the batch.
stacked_heatmaps = tf.stack(heatmaps, axis=0)
return (tf.pow(stacked_heatmaps, self._heatmap_exponent) if
self._heatmap_exponent != 1.0 else stacked_heatmaps)
def assign_center_targets_from_keypoints(self,
height,
width,
gt_classes_list,
gt_keypoints_list,
gt_weights_list=None,
gt_keypoints_weights_list=None):
"""Computes the object center heatmap target using keypoint locations.
Args:
height: int, height of input to the model. This is used to
determine the height of the output.
width: int, width of the input to the model. This is used to
determine the width of the output.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_keypoints_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The box coordinates are expected in normalized coordinates.
gt_weights_list: A list of float tensors with shape [num_boxes]
representing the weight of each groundtruth detection box.
gt_keypoints_weights_list: [Optional] a list of 3D tf.float32 tensors of
shape [num_instances, num_total_keypoints] representing the weights of
each keypoints. If not provided, then all not NaN keypoints will be
equally weighted.
Returns:
heatmap: A Tensor of size [batch_size, output_height, output_width,
num_classes] representing the per class center heatmap. output_height
and output_width are computed by dividing the input height and width by
the stride specified during initialization.
"""
assert (self._keypoint_weights_for_center is not None and
self._keypoint_class_id is not None and
self._keypoint_indices is not None)
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)
heatmaps = []
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
for keypoints, classes, kp_weights, weights in zip(
gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=out_height,
out_width=out_width,
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._keypoint_class_id,
keypoint_indices=self._keypoint_indices)
# _, num_keypoints, _ = (
# shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# Update the keypoint weights by the specified keypoints weights.
kp_loc_weights = tf.constant(
self._keypoint_weights_for_center, dtype=tf.float32)
updated_kp_weights = kp_weights * kp_loc_weights[tf.newaxis, :]
# Obtain the sum of the weights for each instance.
# instance_weight_sum has shape: [num_instance].
instance_weight_sum = tf.reduce_sum(updated_kp_weights, axis=1)
# Weight the keypoint coordinates by updated_kp_weights.
# weighted_keypoints has shape: [num_instance, num_keypoints, 2]
weighted_keypoints = keypoints_absolute * tf.expand_dims(
updated_kp_weights, axis=2)
# Compute the mean of the keypoint coordinates over the weighted
# keypoints.
# keypoint_mean has shape: [num_instance, 2]
keypoint_mean = tf.math.divide(
tf.reduce_sum(weighted_keypoints, axis=1),
tf.expand_dims(instance_weight_sum, axis=-1))
# Replace the NaN values (due to divided by zeros in the above operation)
# by 0.0 where the sum of instance weight is zero.
# keypoint_mean has shape: [num_instance, 2]
keypoint_mean = tf.where(
tf.stack([instance_weight_sum, instance_weight_sum], axis=1) > 0.0,
keypoint_mean, tf.zeros_like(keypoint_mean))
# Compute the distance from each keypoint to the mean location using
# broadcasting and weighted by updated_kp_weights.
# keypoint_dist has shape: [num_instance, num_keypoints]
keypoint_mean = tf.expand_dims(keypoint_mean, axis=1)
keypoint_dist = tf.math.sqrt(
tf.reduce_sum(
tf.math.square(keypoints_absolute - keypoint_mean), axis=2))
keypoint_dist = keypoint_dist * updated_kp_weights
# Compute the average of the distances from each keypoint to the mean
# location and update the average value by zero when the instance weight
# is zero.
# avg_radius has shape: [num_instance]
avg_radius = tf.math.divide(
tf.reduce_sum(keypoint_dist, axis=1), instance_weight_sum)
avg_radius = tf.where(
instance_weight_sum > 0.0, avg_radius, tf.zeros_like(avg_radius))
# Update the class instance weight. If the instance doesn't contain enough
# valid keypoint values (i.e. instance_weight_sum == 0.0), then set the
# instance weight to zero.
# updated_class_weights has shape: [num_instance]
updated_class_weights = tf.where(
instance_weight_sum > 0.0, weights, tf.zeros_like(weights))
# Compute the sigma from average distance. We use 2 * average distance to
# to approximate the width/height of the bounding box.
# sigma has shape: [num_instances].
sigma = _compute_std_dev_from_box_size(2 * avg_radius, 2 * avg_radius,
self._min_overlap)
# Apply the Gaussian kernel to the center coordinates. Returned heatmap
# has shape of [out_height, out_width, num_classes]
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=keypoint_mean[:, 0, 0],
x_coordinates=keypoint_mean[:, 0, 1],
sigma=sigma,
channel_onehot=classes,
channel_weights=updated_class_weights,
sparse=self._compute_heatmap_sparse)
heatmaps.append(heatmap)
# Return the stacked heatmaps over the batch.
return tf.stack(heatmaps, axis=0)
class CenterNetBoxTargetAssigner(object):
"""Wrapper to compute target tensors for the object detection task.
This class has methods that take as input a batch of ground truth tensors
(in the form of a list) and return the targets required to train the object
detection task.
"""
def __init__(self, stride):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
"""
self._stride = stride
def assign_size_and_offset_targets(self,
height,
width,
gt_boxes_list,
gt_weights_list=None,
maximum_normalized_coordinate=1.1):
"""Returns the box height/width and center offset targets and their indices.
The returned values are expected to be used with predicted tensors
of size (batch_size, height//self._stride, width//self._stride, 2). The
predicted values at the relevant indices can be retrieved with the
get_batch_predictions_from_indices function.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_weights_list: A list of tensors with shape [num_boxes] corresponding to
the weight of each groundtruth detection box.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
batch_indices: an integer tensor of shape [num_boxes, 3] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively.
batch_box_height_width: a float tensor of shape [num_boxes, 2] holding
expected height and width of each box in the output space.
batch_offsets: a float tensor of shape [num_boxes, 2] holding the
expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_boxes] indicating the
weight of each prediction.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_box_height_width = []
batch_weights = []
batch_offsets = []
for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1),
maximum_normalized_coordinate=maximum_normalized_coordinate)
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_boxes, 2]
# indices: [num_boxes, 2]
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_box_height_width.append(
tf.stack([boxes_height, boxes_width], axis=1))
batch_weights.append(weights)
batch_offsets.append(offsets)
batch_indices = tf.concat(batch_indices, axis=0)
batch_box_height_width = tf.concat(batch_box_height_width, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_box_height_width, batch_offsets, batch_weights)
# TODO(yuhuic): Update this class to handle the instance/keypoint weights.
# Currently those weights are used as "mask" to indicate whether an
# instance/keypoint should be considered or not (expecting only either 0 or 1
# value). In reality, the weights can be any value and this class should handle
# those values properly.
class CenterNetKeypointTargetAssigner(object):
"""Wrapper to compute target tensors for the CenterNet keypoint estimation.
This class has methods that take as input a batch of groundtruth tensors
(in the form of a list) and returns the targets required to train the
CenterNet model for keypoint estimation. Specifically, the class methods
expect the groundtruth in the following formats (consistent with the
standard Object Detection API). Note that usually the groundtruth tensors are
packed with a list which represents the batch dimension:
gt_classes_list: [Required] a list of 2D tf.float32 one-hot
(or k-hot) tensors of shape [num_instances, num_classes] containing the
class targets with the 0th index assumed to map to the first non-background
class.
gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of
shape [num_instances, num_total_keypoints, 2] containing keypoint
coordinates. Note that the "num_total_keypoints" should be the sum of the
num_keypoints over all possible keypoint types, e.g. human pose, face.
For example, if a dataset contains both 17 human pose keypoints and 5 face
keypoints, then num_total_keypoints = 17 + 5 = 22.
If an intance contains only a subet of keypoints (e.g. human pose keypoints
but not face keypoints), the face keypoints will be filled with zeros.
Also note that keypoints are assumed to be provided in normalized
coordinates and missing keypoints should be encoded as NaN.
gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape
[num_instances, num_total_keypoints] representing the weights of each
keypoints. If not provided, then all not NaN keypoints will be equally
weighted.
gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape
[num_instances, 4] containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and
assumed to be normalized and clipped relative to the image window with
y_min <= y_max and x_min <= x_max.
Note that the boxes are only used to compute the center targets but are not
considered as required output of the keypoint task. If the boxes were not
provided, the center targets will be inferred from the keypoints
[not implemented yet].
gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape
[num_instances] containing weights for groundtruth boxes. Only useful when
gt_boxes_list is also provided.
"""
def __init__(self,
stride,
class_id,
keypoint_indices,
keypoint_std_dev=None,
per_keypoint_offset=False,
peak_radius=0,
compute_heatmap_sparse=False,
per_keypoint_depth=False):
"""Initializes a CenterNet keypoints target assigner.
Args:
stride: int, the stride of the network in output pixels.
class_id: int, the ID of the class (0-indexed) that contains the target
keypoints to consider in this task. For example, if the task is human
pose estimation, the class id should correspond to the "human" class.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
keypoint_std_dev: A list of floats represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap (in the unit of
output pixels). It is to provide the flexibility of using different
sizes of Gaussian kernel for each keypoint type. If not provided, then
all standard deviation will be the same as the default value (10.0 in
the output pixel space). If provided, the length of keypoint_std_dev
needs to be the same as the length of keypoint_indices, indicating the
standard deviation of each keypoint type.
per_keypoint_offset: boolean, indicating whether to assign offset for
each keypoint channel. If set False, the output offset target will have
the shape [batch_size, out_height, out_width, 2]. If set True, the
output offset target will have the shape [batch_size, out_height,
out_width, 2 * num_keypoints].
peak_radius: int, the radius (in the unit of output pixel) around heatmap
peak to assign the offset targets.
compute_heatmap_sparse: bool, indicating whether or not to use the sparse
version of the Op that computes the heatmap. The sparse version scales
better with number of keypoint types, but in some cases is known to
cause an OOM error. See (b/170989061).
per_keypoint_depth: A bool indicates whether the model predicts the depth
of each keypoints in independent channels. Similar to
per_keypoint_offset but for the keypoint depth.
"""
self._stride = stride
self._class_id = class_id
self._keypoint_indices = keypoint_indices
self._per_keypoint_offset = per_keypoint_offset
self._per_keypoint_depth = per_keypoint_depth
self._peak_radius = peak_radius
self._compute_heatmap_sparse = compute_heatmap_sparse
if keypoint_std_dev is None:
self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] *
len(keypoint_indices))
else:
assert len(keypoint_indices) == len(keypoint_std_dev)
self._keypoint_std_dev = keypoint_std_dev
def assign_keypoint_heatmap_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoints_weights_list=None,
gt_weights_list=None,
gt_boxes_list=None):
"""Returns the keypoint heatmap targets for the CenterNet model.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of float tensors with shape [num_instances,
num_total_keypoints, 2]. See class-level description for more detail.
gt_classes_list: A list of float tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See
class-level description for more detail. If provided, the keypoint
standard deviations will be scaled based on the box sizes.
Returns:
heatmap: A float tensor of shape [batch_size, output_height, output_width,
num_keypoints] representing the per keypoint type center heatmap.
output_height and output_width are computed by dividing the input height
and width by the stride specified during initialization. Note that the
"num_keypoints" is defined by the length of keypoint_indices, which is
not necessarily equal to "num_total_keypoints".
num_instances_batch: A 2D int tensor of shape
[batch_size, num_keypoints] representing number of instances for each
keypoint type.
valid_mask: A float tensor with shape [batch_size, output_height,
output_width, num_keypoints] where all values within the regions of the
blackout boxes are 0.0 and 1.0 else where. Note that the blackout boxes
are per keypoint type and are blacked out if the keypoint
visibility/weight (of the corresponding keypoint type) is zero.
"""
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_boxes_list is None:
gt_boxes_list = [None] * len(gt_keypoints_list)
heatmaps = []
num_instances_list = []
valid_mask_list = []
for keypoints, classes, kp_weights, weights, boxes in zip(
gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list, gt_boxes_list):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=out_height,
out_width=out_width,
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# A tensor of shape [num_instances, num_keypoints] with
# each element representing the type dimension for each corresponding
# keypoint:
# [[0, 1, ..., k-1],
# [0, 1, ..., k-1],
# :
# [0, 1, ..., k-1]]
keypoint_types = tf.tile(
input=tf.expand_dims(tf.range(num_keypoints), axis=0),
multiples=[num_instances, 1])
# A tensor of shape [num_instances, num_keypoints] with
# each element representing the sigma of the Gaussian kernel for each
# keypoint.
keypoint_std_dev = tf.tile(
input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0),
multiples=[num_instances, 1])
# If boxes is not None, then scale the standard deviation based on the
# size of the object bounding boxes similar to object center heatmap.
if boxes is not None:
boxes = box_list.BoxList(boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box height and width. Each returned tensors have the shape
# of [num_instances]
(_, _, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
# Compute the sigma from box size. The tensor shape: [num_instances].
sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7)
keypoint_std_dev = keypoint_std_dev * tf.stack(
[sigma] * num_keypoints, axis=1)
# Generate the per-keypoint type valid region mask to ignore regions
# with keypoint weights equal to zeros (e.g. visibility is 0).
# shape of valid_mask: [out_height, out_width, num_keypoints]
kp_weight_list = tf.unstack(kp_weights, axis=1)
valid_mask_channel_list = []
for kp_weight in kp_weight_list:
blackout = kp_weight < 1e-3
valid_mask_channel_list.append(
ta_utils.blackout_pixel_weights_by_box_regions(
out_height, out_width, boxes.get(), blackout))
valid_mask = tf.stack(valid_mask_channel_list, axis=2)
valid_mask_list.append(valid_mask)
# Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap
# has shape of [out_height, out_width, num_keypoints].
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),
x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]),
sigma=tf.keras.backend.flatten(keypoint_std_dev),
channel_onehot=tf.one_hot(
tf.keras.backend.flatten(keypoint_types), depth=num_keypoints),
channel_weights=tf.keras.backend.flatten(kp_weights))
num_instances_list.append(
tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32))
heatmaps.append(heatmap)
return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0),
tf.stack(valid_mask_list, axis=0))
def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors):
"""Gets keypoint type index tensor.
The function prepares the tensor of keypoint indices with shape
[num_instances, num_keypoints, num_neighbors]. Each element represents the
keypoint type index for each corresponding keypoint and tiled along the 3rd
axis:
[[0, 1, ..., num_keypoints - 1],
[0, 1, ..., num_keypoints - 1],
:
[0, 1, ..., num_keypoints - 1]]
Args:
num_instances: int, the number of instances, used to define the 1st
dimension.
num_keypoints: int, the number of keypoint types, used to define the 2nd
dimension.
num_neighbors: int, the number of neighborhood pixels to consider for each
keypoint, used to define the 3rd dimension.
Returns:
A integer tensor of shape [num_instances, num_keypoints, num_neighbors].
"""
keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis]
tiled_keypoint_types = tf.tile(keypoint_types,
multiples=[num_instances, 1, num_neighbors])
return tiled_keypoint_types
def assign_keypoints_offset_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the offsets and indices of the keypoints for location refinement.
The returned values are used to refine the location of each keypoints in the
heatmap. The predicted values at the relevant indices can be retrieved with
the get_batch_predictions_from_indices function.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of tensors with shape [num_instances,
num_total_keypoints]. See class-level description for more detail.
gt_classes_list: A list of tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_total_instances, 3] (or
[num_total_instances, 4] if 'per_keypoint_offset' is set True) holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column corresponds to the channel
dimension (if 'per_keypoint_offset' is set True).
batch_offsets: a float tensor of shape [num_total_instances, 2] holding
the expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_total_instances] indicating
the weight of each prediction.
Note that num_total_instances = batch_size * num_instances *
num_keypoints * num_neighbors
"""
batch_indices = []
batch_offsets = []
batch_weights = []
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, kp_weights, weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# [num_instances * num_keypoints]
y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])
x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])
# All keypoint coordinates and their neighbors:
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
y_source, x_source,
self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
# Update the valid keypoint weights.
# [num_instance * num_keypoints, num_neighbors]
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_instances * num_keypoints, num_neighbors, 2]
# indices: [num_instances * num_keypoints, num_neighbors, 2]
offsets, indices = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=y_source,
x_target=x_source)
# Reshape to:
# offsets: [num_instances * num_keypoints * num_neighbors, 2]
# indices: [num_instances * num_keypoints * num_neighbors, 2]
offsets = tf.reshape(offsets, [-1, 2])
indices = tf.reshape(indices, [-1, 2])
# Prepare the batch indices to be prepended.
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
if self._per_keypoint_offset:
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
else:
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_offsets.append(offsets)
batch_weights.append(tf.keras.backend.flatten(valid_keypoints))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or
# [batch_size * num_instances * num_keypoints * num_neighbors, 4] if
# 'per_keypoint_offset' is set to True.
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2]
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_offsets, batch_weights)
def assign_keypoints_depth_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoint_depths_list,
gt_keypoint_depth_weights_list,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the target depths of the keypoints.
The returned values are the relative depth information of each keypoints.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of tensors with shape [num_instances,
num_total_keypoints, 2]. See class-level description for more detail.
gt_classes_list: A list of tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoint_depths_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the relative depth of the
keypoints.
gt_keypoint_depth_weights_list: A list of tensors with shape
[num_instances, num_total_keypoints] corresponding to the weights of
the relative depth.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_total_instances, 3] (or
[num_total_instances, 4] if 'per_keypoint_depth' is set True) holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column corresponds to the channel
dimension (if 'per_keypoint_offset' is set True).
batch_depths: a float tensor of shape [num_total_instances, 1] (or
[num_total_instances, num_keypoints] if per_keypoint_depth is set True)
indicating the target depth of each keypoint.
batch_weights: a float tensor of shape [num_total_instances] indicating
the weight of each prediction.
Note that num_total_instances = batch_size * num_instances *
num_keypoints * num_neighbors
"""
batch_indices = []
batch_weights = []
batch_depths = []
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_keypoint_depths_list is None:
gt_keypoint_depths_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, kp_weights, weights,
keypoint_depths, keypoint_depth_weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list,
gt_keypoints_weights_list, gt_weights_list,
gt_keypoint_depths_list, gt_keypoint_depth_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# [num_instances * num_keypoints]
y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])
x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])
# All keypoint coordinates and their neighbors:
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
y_source, x_source,
self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
# Update the valid keypoint weights.
# [num_instance * num_keypoints, num_neighbors]
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# indices: [num_instances * num_keypoints, num_neighbors, 2]
_, indices = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=y_source,
x_target=x_source)
# Reshape to:
# indices: [num_instances * num_keypoints * num_neighbors, 2]
indices = tf.reshape(indices, [-1, 2])
# Gather the keypoint depth from corresponding keypoint indices:
# [num_instances, num_keypoints]
keypoint_depths = tf.gather(
keypoint_depths, self._keypoint_indices, axis=1)
# Tile the depth target to surrounding pixels.
# [num_instances, num_keypoints, num_neighbors]
tiled_keypoint_depths = tf.tile(
tf.expand_dims(keypoint_depths, axis=-1),
multiples=[1, 1, num_neighbors])
# [num_instances, num_keypoints]
keypoint_depth_weights = tf.gather(
keypoint_depth_weights, self._keypoint_indices, axis=1)
# [num_instances, num_keypoints, num_neighbors]
keypoint_depth_weights = tf.tile(
tf.expand_dims(keypoint_depth_weights, axis=-1),
multiples=[1, 1, num_neighbors])
# Update the weights of keypoint depth by the weights of the keypoints.
# A keypoint depth target is valid only if its corresponding keypoint
# target is also valid.
# [num_instances, num_keypoints, num_neighbors]
tiled_depth_weights = (
tf.reshape(valid_keypoints,
[num_instances, num_keypoints, num_neighbors]) *
keypoint_depth_weights)
invalid_depths = tf.logical_or(
tf.math.is_nan(tiled_depth_weights),
tf.math.is_nan(tiled_keypoint_depths))
# Assign zero values and weights to NaN values.
final_keypoint_depths = tf.where(invalid_depths,
tf.zeros_like(tiled_keypoint_depths),
tiled_keypoint_depths)
final_keypoint_depth_weights = tf.where(
invalid_depths,
tf.zeros_like(tiled_depth_weights),
tiled_depth_weights)
# [num_instances * num_keypoints * num_neighbors, 1]
batch_depths.append(tf.reshape(final_keypoint_depths, [-1, 1]))
# Prepare the batch indices to be prepended.
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
if self._per_keypoint_depth:
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
else:
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(
tf.keras.backend.flatten(final_keypoint_depth_weights))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or
# [batch_size * num_instances * num_keypoints * num_neighbors, 4] if
# 'per_keypoint_offset' is set to True.
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 1]
batch_depths = tf.concat(batch_depths, axis=0)
return (batch_indices, batch_depths, batch_weights)
def assign_joint_regression_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_boxes_list=None,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the joint regression from center grid to keypoints.
The joint regression is used as the grouping cue from the estimated
keypoints to instance center. The offsets are the vectors from the floored
object center coordinates to the keypoint coordinates.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of float tensors with shape [num_instances,
num_total_keypoints]. See class-level description for more detail.
gt_classes_list: A list of float tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See
class-level description for more detail. If provided, then the center
targets will be computed based on the center of the boxes.
gt_keypoints_weights_list: A list of float tensors with shape
[num_instances, num_total_keypoints] representing to the weight of each
keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_instances, 4] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively, the last dimension refers to the keypoint type
dimension.
batch_offsets: a float tensor of shape [num_instances, 2] holding the
expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_instances] indicating the
weight of each prediction.
Note that num_total_instances = batch_size * num_instances * num_keypoints
Raises:
NotImplementedError: currently the object center coordinates need to be
computed from groundtruth bounding boxes. The functionality of
generating the object center coordinates from keypoints is not
implemented yet.
"""
batch_indices = []
batch_offsets = []
batch_weights = []
batch_size = len(gt_keypoints_list)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * batch_size
if gt_boxes_list is None:
gt_boxes_list = [None] * batch_size
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list,
gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# If boxes are provided, compute the joint center from it.
if boxes is not None:
# Compute joint center from boxes.
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes()
else:
# TODO(yuhuic): Add the logic to generate object centers from keypoints.
raise NotImplementedError((
'The functionality of generating object centers from keypoints is'
' not implemented yet. Please provide groundtruth bounding boxes.'
))
# Tile the yx center coordinates to be the same shape as keypoints.
y_center_tiled = tf.tile(
tf.reshape(y_center, shape=[num_instances, 1]),
multiples=[1, num_keypoints])
x_center_tiled = tf.tile(
tf.reshape(x_center, shape=[num_instances, 1]),
multiples=[1, num_keypoints])
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
tf.keras.backend.flatten(y_center_tiled),
tf.keras.backend.flatten(x_center_tiled), self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_instances * num_keypoints, 2]
# indices: [num_instances * num_keypoints, 2]
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),
x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]))
# Reshape to:
# offsets: [num_instances * num_keypoints * num_neighbors, 2]
# indices: [num_instances * num_keypoints * num_neighbors, 2]
offsets = tf.reshape(offsets, [-1, 2])
indices = tf.reshape(indices, [-1, 2])
# keypoint type tensor: [num_instances, num_keypoints, num_neighbors].
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
batch_offsets.append(offsets)
batch_weights.append(tf.keras.backend.flatten(valid_keypoints))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints, 4]
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints, 2]
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_offsets, batch_weights)
def _resize_masks(masks, height, width, method):
# Resize segmentation masks to conform to output dimensions. Use TF2
# image resize because TF1's version is buggy:
# https://yaqs.corp.google.com/eng/q/4970450458378240
masks = tf2.image.resize(
masks[:, :, :, tf.newaxis],
size=(height, width),
method=method)
return masks[:, :, :, 0]
class CenterNetMaskTargetAssigner(object):
"""Wrapper to compute targets for segmentation masks."""
def __init__(self, stride, boxes_scale=1.0):
"""Constructor.
Args:
stride: The stride of the network. Targets are assigned at the output
stride.
boxes_scale: Scale to apply to boxes before producing mask weights. This
is meant to ensure the full object region is properly weighted prior to
applying loss. A value of ~1.05 is typically applied when object regions
should be blacked out (perhaps because valid groundtruth masks are not
present).
"""
self._stride = stride
self._boxes_scale = boxes_scale
def assign_segmentation_targets(
self, gt_masks_list, gt_classes_list, gt_boxes_list=None,
gt_mask_weights_list=None, mask_resize_method=ResizeMethod.BILINEAR):
"""Computes the segmentation targets.
This utility produces a semantic segmentation mask for each class, starting
with whole image instance segmentation masks. Effectively, each per-class
segmentation target is the union of all masks from that class.
Args:
gt_masks_list: A list of float tensors with shape [num_boxes,
input_height, input_width] with values in {0, 1} representing instance
masks for each object.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_boxes_list: An optional list of float tensors with shape [num_boxes, 4]
with normalized boxes corresponding to each mask. The boxes are used to
spatially allocate mask weights.
gt_mask_weights_list: An optional list of float tensors with shape
[num_boxes] with weights for each mask. If a mask has a zero weight, it
indicates that the box region associated with the mask should not
contribute to the loss. If not provided, will use a per-pixel weight of
1.
mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use
when resizing masks from input resolution to output resolution.
Returns:
segmentation_targets: An int32 tensor of size [batch_size, output_height,
output_width, num_classes] representing the class of each location in
the output space.
segmentation_weight: A float32 tensor of size [batch_size, output_height,
output_width] indicating the loss weight to apply at each location.
"""
_, num_classes = shape_utils.combined_static_and_dynamic_shape(
gt_classes_list[0])
_, input_height, input_width = (
shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))
output_height = tf.maximum(input_height // self._stride, 1)
output_width = tf.maximum(input_width // self._stride, 1)
if gt_boxes_list is None:
gt_boxes_list = [None] * len(gt_masks_list)
if gt_mask_weights_list is None:
gt_mask_weights_list = [None] * len(gt_masks_list)
segmentation_targets_list = []
segmentation_weights_list = []
for gt_boxes, gt_masks, gt_mask_weights, gt_classes in zip(
gt_boxes_list, gt_masks_list, gt_mask_weights_list, gt_classes_list):
if gt_boxes is not None and gt_mask_weights is not None:
boxes = box_list.BoxList(gt_boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes_absolute = box_list_ops.to_absolute_coordinates(
boxes, output_height, output_width)
# Generate a segmentation weight that applies mask weights in object
# regions.
blackout = gt_mask_weights <= 0
segmentation_weight_for_image = (
ta_utils.blackout_pixel_weights_by_box_regions(
output_height, output_width, boxes_absolute.get(), blackout,
weights=gt_mask_weights, boxes_scale=self._boxes_scale))
segmentation_weights_list.append(segmentation_weight_for_image)
else:
segmentation_weights_list.append(tf.ones((output_height, output_width),
dtype=tf.float32))
gt_masks = _resize_masks(gt_masks, output_height, output_width,
mask_resize_method)
gt_masks = gt_masks[:, :, :, tf.newaxis]
gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes])
# Shape: [h, w, num_classes].
segmentations_for_image = tf.reduce_max(
gt_masks * gt_classes_reshaped, axis=0)
# Avoid the case where max of an empty array is -inf.
segmentations_for_image = tf.maximum(segmentations_for_image, 0.0)
segmentation_targets_list.append(segmentations_for_image)
segmentation_target = tf.stack(segmentation_targets_list, axis=0)
segmentation_weight = tf.stack(segmentation_weights_list, axis=0)
return segmentation_target, segmentation_weight
class CenterNetDensePoseTargetAssigner(object):
"""Wrapper to compute targets for DensePose task."""
def __init__(self, stride, num_parts=24):
self._stride = stride
self._num_parts = num_parts
def assign_part_and_coordinate_targets(self,
height,
width,
gt_dp_num_points_list,
gt_dp_part_ids_list,
gt_dp_surface_coords_list,
gt_weights_list=None):
"""Returns the DensePose part_id and coordinate targets and their indices.
The returned values are expected to be used with predicted tensors
of size (batch_size, height//self._stride, width//self._stride, 2). The
predicted values at the relevant indices can be retrieved with the
get_batch_predictions_from_indices function.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes]
containing the number of DensePose sampled points per box.
gt_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape
[num_boxes, max_sampled_points] containing the DensePose part ids
(0-indexed) for each sampled point. Note that there may be padding, as
boxes may contain a different number of sampled points.
gt_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape
[num_boxes, max_sampled_points, 4] containing the DensePose surface
coordinates (normalized) for each sampled point. Note that there may be
padding.
gt_weights_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_total_points, 4] holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column is the part index.
batch_part_ids: an int tensor of shape [num_total_points, num_parts]
holding 1-hot encodings of parts for each sampled point.
batch_surface_coords: a float tensor of shape [num_total_points, 2]
holding the expected (v, u) coordinates for each sampled point.
batch_weights: a float tensor of shape [num_total_points] indicating the
weight of each prediction.
Note that num_total_points = batch_size * num_boxes * max_sampled_points.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_dp_num_points_list)
batch_indices = []
batch_part_ids = []
batch_surface_coords = []
batch_weights = []
for i, (num_points, part_ids, surface_coords, weights) in enumerate(
zip(gt_dp_num_points_list, gt_dp_part_ids_list,
gt_dp_surface_coords_list, gt_weights_list)):
num_boxes, max_sampled_points = (
shape_utils.combined_static_and_dynamic_shape(part_ids))
part_ids_flattened = tf.reshape(part_ids, [-1])
part_ids_one_hot = tf.one_hot(part_ids_flattened, depth=self._num_parts)
# Get DensePose coordinates in the output space.
surface_coords_abs = densepose_ops.to_absolute_coordinates(
surface_coords,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
surface_coords_abs = tf.reshape(surface_coords_abs, [-1, 4])
# Each tensor has shape [num_boxes * max_sampled_points].
yabs, xabs, v, u = tf.unstack(surface_coords_abs, axis=-1)
# Get the indices (in output space) for the DensePose coordinates. Note
# that if self._stride is larger than 1, this will have the effect of
# reducing spatial resolution of the groundtruth points.
indices_y = tf.cast(yabs, tf.int32)
indices_x = tf.cast(xabs, tf.int32)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Create per-point weights.
weights_per_point = tf.reshape(
tf.tile(weights[:, tf.newaxis], multiples=[1, max_sampled_points]),
shape=[-1])
# Mask out invalid (i.e. padded) DensePose points.
num_points_tiled = tf.tile(num_points[:, tf.newaxis],
multiples=[1, max_sampled_points])
range_tiled = tf.tile(tf.range(max_sampled_points)[tf.newaxis, :],
multiples=[num_boxes, 1])
valid_points = tf.math.less(range_tiled, num_points_tiled)
valid_points = tf.cast(tf.reshape(valid_points, [-1]), dtype=tf.float32)
weights_per_point = weights_per_point * valid_points
# Shape of [num_boxes * max_sampled_points] integer tensor filled with
# current batch index.
batch_index = i * tf.ones_like(indices_y, dtype=tf.int32)
batch_indices.append(
tf.stack([batch_index, indices_y, indices_x, part_ids_flattened],
axis=1))
batch_part_ids.append(part_ids_one_hot)
batch_surface_coords.append(tf.stack([v, u], axis=1))
batch_weights.append(weights_per_point)
batch_indices = tf.concat(batch_indices, axis=0)
batch_part_ids = tf.concat(batch_part_ids, axis=0)
batch_surface_coords = tf.concat(batch_surface_coords, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
return batch_indices, batch_part_ids, batch_surface_coords, batch_weights
class CenterNetTrackTargetAssigner(object):
"""Wrapper to compute targets for tracking task.
Reference paper: A Simple Baseline for Multi-Object Tracking [1]
[1]: https://arxiv.org/abs/2004.01888
"""
def __init__(self, stride, num_track_ids):
self._stride = stride
self._num_track_ids = num_track_ids
def assign_track_targets(self,
height,
width,
gt_track_ids_list,
gt_boxes_list,
gt_weights_list=None):
"""Computes the track ID targets.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_track_ids_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the track ID of each groundtruth detection box.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_weights_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [batch_size, num_boxes, 3]
holding the indices inside the predicted tensor which should be
penalized. The first column indicates the index along the batch
dimension and the second and third columns indicate the index
along the y and x dimensions respectively.
batch_weights: a float tensor of shape [batch_size, num_boxes] indicating
the weight of each prediction.
track_id_targets: An int32 tensor of size [batch_size, num_boxes,
num_track_ids] containing the one-hot track ID vector of each
groundtruth detection box.
"""
track_id_targets = tf.one_hot(
gt_track_ids_list, depth=self._num_track_ids, axis=-1)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_weights = []
for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the indices of the box centers. Shape:
# indices: [num_boxes, 2]
(_, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(weights)
batch_indices = tf.stack(batch_indices, axis=0)
batch_weights = tf.stack(batch_weights, axis=0)
return batch_indices, batch_weights, track_id_targets
def filter_mask_overlap_min_area(masks):
"""If a pixel belongs to 2 instances, remove it from the larger instance."""
num_instances = tf.shape(masks)[0]
def _filter_min_area():
"""Helper function to filter non empty masks."""
areas = tf.reduce_sum(masks, axis=[1, 2], keepdims=True)
per_pixel_area = masks * areas
# Make sure background is ignored in argmin.
per_pixel_area = (masks * per_pixel_area +
(1 - masks) * per_pixel_area.dtype.max)
min_index = tf.cast(tf.argmin(per_pixel_area, axis=0), tf.int32)
filtered_masks = (
tf.range(num_instances)[:, tf.newaxis, tf.newaxis]
==
min_index[tf.newaxis, :, :]
)
return tf.cast(filtered_masks, tf.float32) * masks
return tf.cond(num_instances > 0, _filter_min_area,
lambda: masks)
def filter_mask_overlap(masks, method='min_area'):
if method == 'min_area':
return filter_mask_overlap_min_area(masks)
else:
raise ValueError('Unknown mask overlap filter type - {}'.format(method))
class CenterNetCornerOffsetTargetAssigner(object):
"""Wrapper to compute corner offsets for boxes using masks."""
def __init__(self, stride, overlap_resolution='min_area'):
"""Initializes the corner offset target assigner.
Args:
stride: int, the stride of the network in output pixels.
overlap_resolution: string, specifies how we handle overlapping
instance masks. Currently only 'min_area' is supported which assigns
overlapping pixels to the instance with the minimum area.
"""
self._stride = stride
self._overlap_resolution = overlap_resolution
def assign_corner_offset_targets(
self, gt_boxes_list, gt_masks_list):
"""Computes the corner offset targets and foreground map.
For each pixel that is part of any object's foreground, this function
computes the relative offsets to the top-left and bottom-right corners of
that instance's bounding box. It also returns a foreground map to indicate
which pixels contain valid corner offsets.
Args:
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_masks_list: A list of float tensors with shape [num_boxes,
input_height, input_width] with values in {0, 1} representing instance
masks for each object.
Returns:
corner_offsets: A float tensor of shape [batch_size, height, width, 4]
containing, in order, the (y, x) offsets to the top left corner and
the (y, x) offsets to the bottom right corner for each foregroung pixel
foreground: A float tensor of shape [batch_size, height, width] in which
each pixel is set to 1 if it is a part of any instance's foreground
(and thus contains valid corner offsets) and 0 otherwise.
"""
_, input_height, input_width = (
shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))
output_height = tf.maximum(input_height // self._stride, 1)
output_width = tf.maximum(input_width // self._stride, 1)
y_grid, x_grid = tf.meshgrid(
tf.range(output_height), tf.range(output_width),
indexing='ij')
y_grid, x_grid = tf.cast(y_grid, tf.float32), tf.cast(x_grid, tf.float32)
corner_targets = []
foreground_targets = []
for gt_masks, gt_boxes in zip(gt_masks_list, gt_boxes_list):
gt_masks = _resize_masks(gt_masks, output_height, output_width,
method=ResizeMethod.NEAREST_NEIGHBOR)
gt_masks = filter_mask_overlap(gt_masks, self._overlap_resolution)
output_height = tf.cast(output_height, tf.float32)
output_width = tf.cast(output_width, tf.float32)
ymin, xmin, ymax, xmax = tf.unstack(gt_boxes, axis=1)
ymin, ymax = ymin * output_height, ymax * output_height
xmin, xmax = xmin * output_width, xmax * output_width
top_y = ymin[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]
left_x = xmin[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]
bottom_y = ymax[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]
right_x = xmax[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]
foreground_target = tf.cast(tf.reduce_sum(gt_masks, axis=0) > 0.5,
tf.float32)
foreground_targets.append(foreground_target)
corner_target = tf.stack([
tf.reduce_sum(top_y * gt_masks, axis=0),
tf.reduce_sum(left_x * gt_masks, axis=0),
tf.reduce_sum(bottom_y * gt_masks, axis=0),
tf.reduce_sum(right_x * gt_masks, axis=0),
], axis=2)
corner_targets.append(corner_target)
return (tf.stack(corner_targets, axis=0),
tf.stack(foreground_targets, axis=0))
class CenterNetTemporalOffsetTargetAssigner(object):
"""Wrapper to compute target tensors for the temporal offset task.
This class has methods that take as input a batch of ground truth tensors
(in the form of a list) and returns the targets required to train the
temporal offset task.
"""
def __init__(self, stride):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
"""
self._stride = stride
def assign_temporal_offset_targets(self,
height,
width,
gt_boxes_list,
gt_offsets_list,
gt_match_list,
gt_weights_list=None):
"""Returns the temporal offset targets and their indices.
For each ground truth box, this function assigns it the corresponding
temporal offset to train the model.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_offsets_list: A list of 2-D tf.float32 tensors of shape [num_boxes, 2]
containing the spatial offsets of objects' centers compared with the
previous frame.
gt_match_list: A list of 1-D tf.float32 tensors of shape [num_boxes]
containing flags that indicate if an object has existed in the
previous frame.
gt_weights_list: A list of tensors with shape [num_boxes] corresponding to
the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_boxes, 3] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively.
batch_temporal_offsets: a float tensor of shape [num_boxes, 2] of the
expected y and x temporal offset of each object center in the
output space.
batch_weights: a float tensor of shape [num_boxes] indicating the
weight of each prediction.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_weights = []
batch_temporal_offsets = []
for i, (boxes, offsets, match_flags, weights) in enumerate(zip(
gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_boxes, 2]
# indices: [num_boxes, 2]
(_, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
# if an object is not matched, its weight becomes zero.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
weights *= match_flags
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(weights)
batch_temporal_offsets.append(offsets)
batch_indices = tf.concat(batch_indices, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
batch_temporal_offsets = tf.concat(batch_temporal_offsets, axis=0)
return (batch_indices, batch_temporal_offsets, batch_weights)
class DETRTargetAssigner(object):
"""Target assigner for DETR (https://arxiv.org/abs/2005.12872).
Detection Transformer (DETR) matches predicted boxes to groundtruth directly
to determine targets instead of matching anchors to groundtruth. Hence, the
new target assigner.
"""
def __init__(self):
"""Construct Object Detection Target Assigner."""
self._similarity_calc = sim_calc.DETRSimilarity()
self._matcher = hungarian_matcher.HungarianBipartiteMatcher()
def batch_assign(self,
pred_box_batch,
gt_box_batch,
pred_class_batch,
gt_class_targets_batch,
gt_weights_batch=None,
unmatched_class_label_batch=None):
"""Batched assignment of classification and regression targets.
Args:
pred_box_batch: a tensor of shape [batch_size, num_queries, 4]
representing predicted bounding boxes.
gt_box_batch: a tensor of shape [batch_size, num_queries, 4]
representing groundtruth bounding boxes.
pred_class_batch: A list of tensors with length batch_size, where each
each tensor has shape [num_queries, num_classes] to be used
by certain similarity calculators.
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, num_classes] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
unmatched_class_label_batch: a float32 tensor with shape
[d_1, d_2, ..., d_k] which is consistent with the classification target
for each anchor (and can be empty for scalar targets). This shape must
thus be compatible with the `gt_class_targets_batch`.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_pred_boxes,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_pred_boxes,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_pred_boxes,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_pred_boxes].
"""
pred_box_batch = [
box_list.BoxList(pred_box)
for pred_box in tf.unstack(pred_box_batch)]
gt_box_batch = [
box_list.BoxList(gt_box)
for gt_box in tf.unstack(gt_box_batch)]
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
if unmatched_class_label_batch is None:
unmatched_class_label_batch = [None] * len(gt_class_targets_batch)
pred_class_batch = tf.unstack(pred_class_batch)
for (pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights,
unmatched_class_label) in zip(pred_box_batch, gt_box_batch,
pred_class_batch, gt_class_targets_batch,
gt_weights_batch,
unmatched_class_label_batch):
(cls_targets, cls_weights, reg_targets,
reg_weights) = self.assign(pred_boxes, gt_boxes, pred_class_batch,
gt_class_targets, gt_weights,
unmatched_class_label)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights)
def assign(self,
pred_boxes,
gt_boxes,
pred_classes,
gt_labels,
gt_weights=None,
unmatched_class_label=None):
"""Assign classification and regression targets to each box_pred.
For a given set of pred_boxes and groundtruth detections, match pred_boxes
to gt_boxes and assign classification and regression targets to
each box_pred as well as weights based on the resulting match (specifying,
e.g., which pred_boxes should not contribute to training loss).
pred_boxes that are not matched to anything are given a classification
target of `unmatched_cls_target`.
Args:
pred_boxes: a BoxList representing N pred_boxes
gt_boxes: a BoxList representing M groundtruth boxes
pred_classes: A tensor with shape [max_num_boxes, num_classes]
to be used by certain similarity calculators.
gt_labels: a tensor of shape [M, num_classes]
with labels for each of the ground_truth boxes. The subshape
[num_classes] can be empty (corresponding to scalar inputs). When set
to None, gt_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
gt_weights: a float tensor of shape [M] indicating the weight to
assign to all pred_boxes match to a particular groundtruth box. The
weights must be in [0., 1.]. If None, all weights are set to 1.
Generally no groundtruth boxes with zero weight match to any pred_boxes
as matchers are aware of groundtruth weights. Additionally,
`cls_weights` and `reg_weights` are calculated using groundtruth
weights as an added safety.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
Returns:
cls_targets: a float32 tensor with shape [num_pred_boxes, num_classes],
where the subshape [num_classes] is compatible with gt_labels
which has shape [num_gt_boxes, num_classes].
cls_weights: a float32 tensor with shape [num_pred_boxes, num_classes],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_pred_boxes,
box_code_dimension]
reg_weights: a float32 tensor with shape [num_pred_boxes]
"""
if not unmatched_class_label:
unmatched_class_label = tf.constant(
[1] + [0] * (gt_labels.shape[1] - 1), tf.float32)
if gt_weights is None:
num_gt_boxes = gt_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = gt_boxes.num_boxes()
gt_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
gt_boxes.add_field(fields.BoxListFields.classes, gt_labels)
pred_boxes.add_field(fields.BoxListFields.classes, pred_classes)
match_quality_matrix = self._similarity_calc.compare(
gt_boxes,
pred_boxes)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(gt_weights, 0))
matched_gt_boxes = match.gather_based_on_match(
gt_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
ty, tx, th, tw = matched_gt_boxlist.get_center_coordinates_and_sizes()
reg_targets = tf.transpose(tf.stack([ty, tx, th, tw]))
cls_targets = match.gather_based_on_match(
gt_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
reg_weights = match.gather_based_on_match(
gt_weights,
ignored_value=0.,
unmatched_value=0.)
cls_weights = match.gather_based_on_match(
gt_weights,
ignored_value=0.,
unmatched_value=1)
# convert cls_weights from per-box_pred to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_multiple = tf.concat(
[tf.constant([1]), class_label_shape],
axis=0)
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
return (cls_targets, cls_weights, reg_targets, reg_weights)
| 128,840 | 45.800218 | 96 | py |
models | models-master/research/object_detection/core/freezable_batch_norm.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A freezable batch norm layer that uses Keras batch normalization."""
import tensorflow.compat.v1 as tf
class FreezableBatchNorm(tf.keras.layers.BatchNormalization):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
This is a `freezable` batch norm layer that supports setting the `training`
parameter in the __init__ method rather than having to set it either via
the Keras learning phase or via the `call` method parameter. This layer will
forward all other parameters to the default Keras `BatchNormalization`
layer
This is class is necessary because Object Detection model training sometimes
requires batch normalization layers to be `frozen` and used as if it was
evaluation time, despite still training (and potentially using dropout layers)
Like the default Keras BatchNormalization layer, this will normalize the
activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Args:
training: If False, the layer will normalize using the moving average and
std. dev, without updating the learned avg and std. dev.
If None or True, the layer will follow the keras BatchNormalization layer
strategy of checking the Keras learning phase at `call` time to decide
what to do.
**kwargs: The keyword arguments to forward to the keras BatchNormalization
layer constructor.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self, training=None, **kwargs):
super(FreezableBatchNorm, self).__init__(**kwargs)
self._training = training
def call(self, inputs, training=None):
# Override the call arg only if the batchnorm is frozen. (Ignore None)
if self._training is False: # pylint: disable=g-bool-id-comparison
training = self._training
return super(FreezableBatchNorm, self).call(inputs, training=training)
| 2,987 | 42.304348 | 80 | py |
models | models-master/research/object_detection/core/model.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract detection model.
This file defines a generic base class for detection models. Programs that are
designed to work with arbitrary detection models should only depend on this
class. We intend for the functions in this class to follow tensor-in/tensor-out
design, thus all functions have tensors or lists/dictionaries holding tensors as
inputs and outputs.
Abstractly, detection models predict output tensors given input images
which can be passed to a loss function at training time or passed to a
postprocessing function at eval time. The computation graphs at a high level
consequently look as follows:
Training time:
inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor)
Evaluation time:
inputs (images tensor) -> preprocess -> predict -> postprocess
-> outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor)
DetectionModels must thus implement four functions (1) preprocess, (2) predict,
(3) postprocess and (4) loss. DetectionModels should make no assumptions about
the input size or aspect ratio --- they are responsible for doing any
resize/reshaping necessary (see docstring for the preprocess function).
Output classes are always integers in the range [0, num_classes). Any mapping
of these integers to semantic labels is to be handled outside of this class.
Images are resized in the `preprocess` method. All of `preprocess`, `predict`,
and `postprocess` should be reentrant.
The `preprocess` method runs `image_resizer_fn` that returns resized_images and
`true_image_shapes`. Since `image_resizer_fn` can pad the images with zeros,
true_image_shapes indicate the slices that contain the image without padding.
This is useful for padding images to be a fixed size for batching.
The `postprocess` method uses the true image shapes to clip predictions that lie
outside of images.
By default, DetectionModels produce bounding box detections; However, we support
a handful of auxiliary annotations associated with each bounding box, namely,
instance masks and keypoints.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
# If using a new enough version of TensorFlow, detection models should be a
# tf module or keras model for tracking.
try:
_BaseClass = tf.keras.layers.Layer
except AttributeError:
_BaseClass = object
class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)):
"""Abstract base class for detection models.
Extends tf.Module to guarantee variable tracking.
"""
def __init__(self, num_classes):
"""Constructor.
Args:
num_classes: number of classes. Note that num_classes *does not* include
background categories that might be implicitly predicted in various
implementations.
"""
self._num_classes = num_classes
self._groundtruth_lists = {}
self._training_step = None
super(DetectionModel, self).__init__()
@property
def num_classes(self):
return self._num_classes
def groundtruth_lists(self, field):
"""Access list of groundtruth tensors.
Args:
field: a string key, options are
fields.BoxListFields.{boxes,classes,masks,mask_weights,keypoints,
keypoint_visibilities, densepose_*, track_ids,
temporal_offsets, track_match_flags}
fields.InputDataFields.is_annotated.
Returns:
a list of tensors holding groundtruth information (see also
provide_groundtruth function below), with one entry for each image in the
batch.
Raises:
RuntimeError: if the field has not been provided via provide_groundtruth.
"""
if field not in self._groundtruth_lists:
raise RuntimeError('Groundtruth tensor {} has not been provided'.format(
field))
return self._groundtruth_lists[field]
def groundtruth_has_field(self, field):
"""Determines whether the groundtruth includes the given field.
Args:
field: a string key, options are
fields.BoxListFields.{boxes,classes,masks,mask_weights,keypoints,
keypoint_visibilities, densepose_*, track_ids} or
fields.InputDataFields.is_annotated.
Returns:
True if the groundtruth includes the given field, False otherwise.
"""
return field in self._groundtruth_lists
@property
def training_step(self):
if self._training_step is None:
raise ValueError('Training step was not provided to the model.')
return self._training_step
@staticmethod
def get_side_inputs(features):
"""Get side inputs from input features.
This placeholder method provides a way for a meta-architecture to specify
how to grab additional side inputs from input features (in addition to the
image itself) and allows models to depend on contextual information. By
default, detection models do not use side information (and thus this method
returns an empty dictionary by default. However it can be overridden if
side inputs are necessary."
Args:
features: A dictionary of tensors.
Returns:
An empty dictionary by default.
"""
return {}
@abc.abstractmethod
def preprocess(self, inputs):
"""Input preprocessing.
To be overridden by implementations.
This function is responsible for any scaling/shifting of input values that
is necessary prior to running the detector on an input image.
It is also responsible for any resizing, padding that might be necessary
as images are assumed to arrive in arbitrary sizes. While this function
could conceivably be part of the predict method (below), it is often
convenient to keep these separate --- for example, we may want to preprocess
on one device, place onto a queue, and let another device (e.g., the GPU)
handle prediction.
A few important notes about the preprocess function:
+ We assume that this operation does not have any trainable variables nor
does it affect the groundtruth annotations in any way (thus data
augmentation operations such as random cropping should be performed
externally).
+ There is no assumption that the batchsize in this function is the same as
the batch size in the predict function. In fact, we recommend calling the
preprocess function prior to calling any batching operations (which should
happen outside of the model) and thus assuming that batch sizes are equal
to 1 in the preprocess function.
+ There is also no explicit assumption that the output resolutions
must be fixed across inputs --- this is to support "fully convolutional"
settings in which input images can have different shapes/resolutions.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
pass
@abc.abstractmethod
def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs):
"""Predict prediction tensors from inputs tensor.
Outputs of this function can be passed to loss or postprocess functions.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**side_inputs: additional tensors that are required by the network.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
pass
@abc.abstractmethod
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Convert predicted output tensors to final detections.
This stage typically performs a few things such as
* Non-Max Suppression to remove overlapping detection boxes.
* Score conversion and background class removal.
Outputs adhere to the following conventions:
* Classes are integers in [0, num_classes); background classes are removed
and the first non-background class is mapped to 0. If the model produces
class-agnostic detections, then no output is produced for classes.
* Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max]
format and normalized relative to the image window.
* `num_detections` is provided for settings where detections are padded to a
fixed number of boxes.
* We do not specifically assume any kind of probabilistic interpretation
of the scores --- the only important thing is their relative ordering.
Thus implementations of the postprocess function are free to output
logits, probabilities, calibrated probabilities, or anything else.
Args:
prediction_dict: a dictionary holding prediction tensors.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detections, 4]
detection_scores: [batch, max_detections]
detection_classes: [batch, max_detections]
(If a model is producing class-agnostic detections, this field may be
missing)
detection_masks: [batch, max_detections, mask_height, mask_width]
(optional)
detection_keypoints: [batch, max_detections, num_keypoints, 2]
(optional)
detection_keypoint_scores: [batch, max_detections, num_keypoints]
(optional)
detection_surface_coords: [batch, max_detections, mask_height,
mask_width, 2] (optional)
num_detections: [batch]
In addition to the above fields this stage also outputs the following
raw tensors:
raw_detection_boxes: [batch, total_detections, 4] tensor containing
all detection boxes from `prediction_dict` in the format
[ymin, xmin, ymax, xmax] and normalized co-ordinates.
raw_detection_scores: [batch, total_detections,
num_classes_with_background] tensor of class score logits for
raw detection boxes.
"""
pass
@abc.abstractmethod
def loss(self, prediction_dict, true_image_shapes):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
pass
def provide_groundtruth(
self,
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list=None,
groundtruth_mask_weights_list=None,
groundtruth_keypoints_list=None,
groundtruth_keypoint_visibilities_list=None,
groundtruth_dp_num_points_list=None,
groundtruth_dp_part_ids_list=None,
groundtruth_dp_surface_coords_list=None,
groundtruth_track_ids_list=None,
groundtruth_temporal_offsets_list=None,
groundtruth_track_match_flags_list=None,
groundtruth_weights_list=None,
groundtruth_confidences_list=None,
groundtruth_is_crowd_list=None,
groundtruth_group_of_list=None,
groundtruth_area_list=None,
is_annotated_list=None,
groundtruth_labeled_classes=None,
groundtruth_verified_neg_classes=None,
groundtruth_not_exhaustive_classes=None,
groundtruth_keypoint_depths_list=None,
groundtruth_keypoint_depth_weights_list=None,
groundtruth_image_classes=None,
training_step=None):
"""Provide groundtruth tensors.
Args:
groundtruth_boxes_list: a list of 2-D tf.float32 tensors of shape
[num_boxes, 4] containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max]
format and assumed to be normalized and clipped
relative to the image window with y_min <= y_max and x_min <= x_max.
groundtruth_classes_list: a list of 2-D tf.float32 one-hot (or k-hot)
tensors of shape [num_boxes, num_classes] containing the class targets
with the 0th index assumed to map to the first non-background class.
groundtruth_masks_list: a list of 3-D tf.float32 tensors of
shape [num_boxes, height_in, width_in] containing instance
masks with values in {0, 1}. If None, no masks are provided.
Mask resolution `height_in`x`width_in` must agree with the resolution
of the input image tensor provided to the `preprocess` function.
groundtruth_mask_weights_list: a list of 1-D tf.float32 tensors of shape
[num_boxes] with weights for each instance mask.
groundtruth_keypoints_list: a list of 3-D tf.float32 tensors of
shape [num_boxes, num_keypoints, 2] containing keypoints.
Keypoints are assumed to be provided in normalized coordinates and
missing keypoints should be encoded as NaN (but it is recommended to use
`groundtruth_keypoint_visibilities_list`).
groundtruth_keypoint_visibilities_list: a list of 3-D tf.bool tensors
of shape [num_boxes, num_keypoints] containing keypoint visibilities.
groundtruth_dp_num_points_list: a list of 1-D tf.int32 tensors of shape
[num_boxes] containing the number of DensePose sampled points.
groundtruth_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape
[num_boxes, max_sampled_points] containing the DensePose part ids
(0-indexed) for each sampled point. Note that there may be padding.
groundtruth_dp_surface_coords_list: a list of 3-D tf.float32 tensors of
shape [num_boxes, max_sampled_points, 4] containing the DensePose
surface coordinates for each sampled point. Note that there may be
padding.
groundtruth_track_ids_list: a list of 1-D tf.int32 tensors of shape
[num_boxes] containing the track IDs of groundtruth objects.
groundtruth_temporal_offsets_list: a list of 2-D tf.float32 tensors
of shape [num_boxes, 2] containing the spatial offsets of objects'
centers compared with the previous frame.
groundtruth_track_match_flags_list: a list of 1-D tf.float32 tensors
of shape [num_boxes] containing 0-1 flags that indicate if an object
has existed in the previous frame.
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape
[num_boxes, num_classes] containing class confidences for groundtruth
boxes.
groundtruth_is_crowd_list: A list of 1-D tf.bool tensors of shape
[num_boxes] containing is_crowd annotations.
groundtruth_group_of_list: A list of 1-D tf.bool tensors of shape
[num_boxes] containing group_of annotations.
groundtruth_area_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing the area (in the original absolute coordinates)
of the annotations.
is_annotated_list: A list of scalar tf.bool tensors indicating whether
images have been labeled or not.
groundtruth_labeled_classes: A list of 1-D tf.float32 tensors of shape
[num_classes], containing label indices encoded as k-hot of the classes
that are exhaustively annotated.
groundtruth_verified_neg_classes: A list of 1-D tf.float32 tensors of
shape [num_classes], containing a K-hot representation of classes
which were verified as not present in the image.
groundtruth_not_exhaustive_classes: A list of 1-D tf.float32 tensors of
shape [num_classes], containing a K-hot representation of classes
which don't have all of their instances marked exhaustively.
groundtruth_keypoint_depths_list: a list of 2-D tf.float32 tensors
of shape [num_boxes, num_keypoints] containing keypoint relative depths.
groundtruth_keypoint_depth_weights_list: a list of 2-D tf.float32 tensors
of shape [num_boxes, num_keypoints] containing the weights of the
relative depths.
groundtruth_image_classes: A list of 1-D tf.float32 tensors of shape
[num_classes], containing label indices encoded as k-hot of the classes
that are present or not present in the image.
training_step: An integer denoting the current training step. This is
useful when models want to anneal loss terms.
"""
self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list
self._groundtruth_lists[
fields.BoxListFields.classes] = groundtruth_classes_list
if groundtruth_weights_list:
self._groundtruth_lists[fields.BoxListFields.
weights] = groundtruth_weights_list
if groundtruth_confidences_list:
self._groundtruth_lists[fields.BoxListFields.
confidences] = groundtruth_confidences_list
if groundtruth_masks_list:
self._groundtruth_lists[
fields.BoxListFields.masks] = groundtruth_masks_list
if groundtruth_mask_weights_list:
self._groundtruth_lists[
fields.BoxListFields.mask_weights] = groundtruth_mask_weights_list
if groundtruth_keypoints_list:
self._groundtruth_lists[
fields.BoxListFields.keypoints] = groundtruth_keypoints_list
if groundtruth_keypoint_visibilities_list:
self._groundtruth_lists[
fields.BoxListFields.keypoint_visibilities] = (
groundtruth_keypoint_visibilities_list)
if groundtruth_keypoint_depths_list:
self._groundtruth_lists[
fields.BoxListFields.keypoint_depths] = (
groundtruth_keypoint_depths_list)
if groundtruth_keypoint_depth_weights_list:
self._groundtruth_lists[
fields.BoxListFields.keypoint_depth_weights] = (
groundtruth_keypoint_depth_weights_list)
if groundtruth_dp_num_points_list:
self._groundtruth_lists[
fields.BoxListFields.densepose_num_points] = (
groundtruth_dp_num_points_list)
if groundtruth_dp_part_ids_list:
self._groundtruth_lists[
fields.BoxListFields.densepose_part_ids] = (
groundtruth_dp_part_ids_list)
if groundtruth_dp_surface_coords_list:
self._groundtruth_lists[
fields.BoxListFields.densepose_surface_coords] = (
groundtruth_dp_surface_coords_list)
if groundtruth_track_ids_list:
self._groundtruth_lists[
fields.BoxListFields.track_ids] = groundtruth_track_ids_list
if groundtruth_temporal_offsets_list:
self._groundtruth_lists[
fields.BoxListFields.temporal_offsets] = (
groundtruth_temporal_offsets_list)
if groundtruth_track_match_flags_list:
self._groundtruth_lists[
fields.BoxListFields.track_match_flags] = (
groundtruth_track_match_flags_list)
if groundtruth_is_crowd_list:
self._groundtruth_lists[
fields.BoxListFields.is_crowd] = groundtruth_is_crowd_list
if groundtruth_group_of_list:
self._groundtruth_lists[
fields.BoxListFields.group_of] = groundtruth_group_of_list
if groundtruth_area_list:
self._groundtruth_lists[
fields.InputDataFields.groundtruth_area] = groundtruth_area_list
if is_annotated_list:
self._groundtruth_lists[
fields.InputDataFields.is_annotated] = is_annotated_list
if groundtruth_labeled_classes:
self._groundtruth_lists[
fields.InputDataFields
.groundtruth_labeled_classes] = groundtruth_labeled_classes
if groundtruth_verified_neg_classes:
self._groundtruth_lists[
fields.InputDataFields
.groundtruth_verified_neg_classes] = groundtruth_verified_neg_classes
if groundtruth_image_classes:
self._groundtruth_lists[
fields.InputDataFields
.groundtruth_image_classes] = groundtruth_image_classes
if groundtruth_not_exhaustive_classes:
self._groundtruth_lists[
fields.InputDataFields
.groundtruth_not_exhaustive_classes] = (
groundtruth_not_exhaustive_classes)
if training_step is not None:
self._training_step = training_step
@abc.abstractmethod
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
pass
@abc.abstractmethod
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
"""Returns a map of variables to load from a foreign checkpoint.
Returns a map of variable names to load from a checkpoint to variables in
the model graph. This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
load_all_detection_checkpoint_vars: whether to load all variables (when
`fine_tune_checkpoint_type` is `detection`). If False, only variables
within the feature extractor scope are included. Default False.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
pass
@abc.abstractmethod
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of variables to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (above) is intended
to be used to restore Slim-based models when running Tensorflow 1.x.
TODO(jonathanhuang,rathodv): Check tf_version and raise unimplemented
error for both restore_map and restore_from_objects depending on version.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
pass
@abc.abstractmethod
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
pass
def call(self, images):
"""Returns detections from a batch of images.
This method calls the preprocess, predict and postprocess function
sequentially and returns the output.
Args:
images: a [batch_size, height, width, channels] float tensor.
Returns:
detetcions: The dict of tensors returned by the postprocess function.
"""
preprocessed_images, shapes = self.preprocess(images)
prediction_dict = self.predict(preprocessed_images, shapes)
return self.postprocess(prediction_dict, shapes)
| 26,202 | 43.187184 | 80 | py |
models | models-master/research/object_detection/core/freezable_sync_batch_norm.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A freezable batch norm layer that uses Keras sync batch normalization."""
import tensorflow as tf
class FreezableSyncBatchNorm(tf.keras.layers.experimental.SyncBatchNormalization
):
"""Sync Batch normalization layer (Ioffe and Szegedy, 2014).
This is a `freezable` batch norm layer that supports setting the `training`
parameter in the __init__ method rather than having to set it either via
the Keras learning phase or via the `call` method parameter. This layer will
forward all other parameters to the Keras `SyncBatchNormalization` layer
This is class is necessary because Object Detection model training sometimes
requires batch normalization layers to be `frozen` and used as if it was
evaluation time, despite still training (and potentially using dropout layers)
Like the default Keras SyncBatchNormalization layer, this will normalize the
activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self, training=None, **kwargs):
"""Constructor.
Args:
training: If False, the layer will normalize using the moving average and
std. dev, without updating the learned avg and std. dev.
If None or True, the layer will follow the keras SyncBatchNormalization
layer strategy of checking the Keras learning phase at `call` time to
decide what to do.
**kwargs: The keyword arguments to forward to the keras
SyncBatchNormalization layer constructor.
"""
super(FreezableSyncBatchNorm, self).__init__(**kwargs)
self._training = training
def call(self, inputs, training=None):
# Override the call arg only if the batchnorm is frozen. (Ignore None)
if self._training is False: # pylint: disable=g-bool-id-comparison
training = self._training
return super(FreezableSyncBatchNorm, self).call(inputs, training=training)
| 3,093 | 42.577465 | 80 | py |
models | models-master/research/object_detection/core/freezable_batch_norm_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.freezable_batch_norm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import parameterized
import numpy as np
from six.moves import zip
import tensorflow as tf
from object_detection.core import freezable_batch_norm
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.core import freezable_sync_batch_norm
# pylint: enable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FreezableBatchNormTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for FreezableBatchNorm operations."""
def _build_model(self, use_sync_batch_norm, training=None):
model = tf.keras.models.Sequential()
norm = None
if use_sync_batch_norm:
norm = freezable_sync_batch_norm.FreezableSyncBatchNorm(training=training,
input_shape=(10,),
momentum=0.8)
else:
norm = freezable_batch_norm.FreezableBatchNorm(training=training,
input_shape=(10,),
momentum=0.8)
model.add(norm)
return model, norm
def _copy_weights(self, source_weights, target_weights):
for source, target in zip(source_weights, target_weights):
target.assign(source)
def _train_freezable_batch_norm(self, training_mean, training_var,
use_sync_batch_norm):
model, _ = self._build_model(use_sync_batch_norm=use_sync_batch_norm)
model.compile(loss='mse', optimizer='sgd')
# centered on training_mean, variance training_var
train_data = np.random.normal(
loc=training_mean,
scale=training_var,
size=(1000, 10))
model.fit(train_data, train_data, epochs=4, verbose=0)
return model.weights
def _test_batchnorm_layer(
self, norm, should_be_training, test_data,
testing_mean, testing_var, training_arg, training_mean, training_var):
out_tensor = norm(tf.convert_to_tensor(test_data, dtype=tf.float32),
training=training_arg)
out = out_tensor
out -= norm.beta
out /= norm.gamma
if not should_be_training:
out *= training_var
out += (training_mean - testing_mean)
out /= testing_var
np.testing.assert_allclose(out.numpy().mean(), 0.0, atol=1.5e-1)
np.testing.assert_allclose(out.numpy().std(), 1.0, atol=1.5e-1)
@parameterized.parameters(True, False)
def test_batchnorm_freezing_training_none(self, use_sync_batch_norm):
training_mean = 5.0
training_var = 10.0
testing_mean = -10.0
testing_var = 5.0
# Initially train the batch norm, and save the weights
trained_weights = self._train_freezable_batch_norm(training_mean,
training_var,
use_sync_batch_norm)
# Load the batch norm weights, freezing training to True.
# Apply the batch norm layer to testing data and ensure it is normalized
# according to the batch statistics.
model, norm = self._build_model(use_sync_batch_norm, training=True)
self._copy_weights(trained_weights, model.weights)
# centered on testing_mean, variance testing_var
test_data = np.random.normal(
loc=testing_mean,
scale=testing_var,
size=(1000, 10))
# Test with training=True passed to the call method:
training_arg = True
should_be_training = True
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Reset the weights, because they may have been updating by
# running with training=True
self._copy_weights(trained_weights, model.weights)
# Test with training=False passed to the call method:
training_arg = False
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test the layer in various Keras learning phase scopes:
training_arg = None
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(True)
should_be_training = True
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Reset the weights, because they may have been updating by
# running with training=True
self._copy_weights(trained_weights, model.weights)
tf.keras.backend.set_learning_phase(False)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
@parameterized.parameters(True, False)
def test_batchnorm_freezing_training_false(self, use_sync_batch_norm):
training_mean = 5.0
training_var = 10.0
testing_mean = -10.0
testing_var = 5.0
# Initially train the batch norm, and save the weights
trained_weights = self._train_freezable_batch_norm(training_mean,
training_var,
use_sync_batch_norm)
# Load the batch norm back up, freezing training to False.
# Apply the batch norm layer to testing data and ensure it is normalized
# according to the training data's statistics.
model, norm = self._build_model(use_sync_batch_norm, training=False)
self._copy_weights(trained_weights, model.weights)
# centered on testing_mean, variance testing_var
test_data = np.random.normal(
loc=testing_mean,
scale=testing_var,
size=(1000, 10))
# Make sure that the layer is never training
# Test with training=True passed to the call method:
training_arg = True
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test with training=False passed to the call method:
training_arg = False
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test the layer in various Keras learning phase scopes:
training_arg = None
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(True)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(False)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
if __name__ == '__main__':
tf.test.main()
| 8,681 | 38.643836 | 80 | py |
models | models-master/research/object_detection/core/box_predictor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Box predictor for object detectors.
Box predictors are classes that take a high level
image feature map as input and produce two predictions,
(1) a tensor encoding box locations, and
(2) a tensor encoding classes for each box.
These components are passed directly to loss functions
in our detection models.
These modules are separated from the main model since the same
few box predictor architectures are shared across many models.
"""
from abc import abstractmethod
import tensorflow.compat.v1 as tf
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
MASK_PREDICTIONS = 'mask_predictions'
class BoxPredictor(object):
"""BoxPredictor."""
def __init__(self, is_training, num_classes):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
"""
self._is_training = is_training
self._num_classes = num_classes
@property
def is_keras_model(self):
return False
@property
def num_classes(self):
return self._num_classes
def predict(self, image_features, num_predictions_per_location,
scope=None, **params):
"""Computes encoded object locations and corresponding confidences.
Takes a list of high level image feature maps as input and produces a list
of box encodings and a list of class scores where each element in the output
lists correspond to the feature maps in the input list.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location: A list of integers representing the number
of box predictions to be made per spatial location for each feature map.
scope: Variable and Op scope name.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
Raises:
ValueError: If length of `image_features` is not equal to length of
`num_predictions_per_location`.
"""
if len(image_features) != len(num_predictions_per_location):
raise ValueError('image_feature and num_predictions_per_location must '
'be of same length, found: {} vs {}'.
format(len(image_features),
len(num_predictions_per_location)))
if scope is not None:
with tf.variable_scope(scope):
return self._predict(image_features, num_predictions_per_location,
**params)
return self._predict(image_features, num_predictions_per_location,
**params)
# TODO(rathodv): num_predictions_per_location could be moved to constructor.
# This is currently only used by ConvolutionalBoxPredictor.
@abstractmethod
def _predict(self, image_features, num_predictions_per_location, **params):
"""Implementations must override this method.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location: A list of integers representing the number
of box predictions to be made per spatial location for each feature map.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
pass
class KerasBoxPredictor(tf.keras.layers.Layer):
"""Keras-based BoxPredictor."""
def __init__(self, is_training, num_classes, freeze_batchnorm,
inplace_batchnorm_update, name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
"""
super(KerasBoxPredictor, self).__init__(name=name)
self._is_training = is_training
self._num_classes = num_classes
self._freeze_batchnorm = freeze_batchnorm
self._inplace_batchnorm_update = inplace_batchnorm_update
@property
def is_keras_model(self):
return True
@property
def num_classes(self):
return self._num_classes
def call(self, image_features, **kwargs):
"""Computes encoded object locations and corresponding confidences.
Takes a list of high level image feature maps as input and produces a list
of box encodings and a list of class scores where each element in the output
lists correspond to the feature maps in the input list.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
**kwargs: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
return self._predict(image_features, **kwargs)
@abstractmethod
def _predict(self, image_features, **kwargs):
"""Implementations must override this method.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
**kwargs: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
raise NotImplementedError
| 10,193 | 43.710526 | 80 | py |
models | models-master/research/object_detection/core/model_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.core import model
from object_detection.utils import test_case
class FakeModel(model.DetectionModel):
def __init__(self):
# sub-networks containing weights of different shapes.
self._network1 = tf.keras.Sequential([
tf.keras.layers.Conv2D(8, 1)
])
self._network2 = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 1)
])
super(FakeModel, self).__init__(num_classes=0)
def preprocess(self, images):
return images, tf.shape(images)
def predict(self, images, shapes):
return {'prediction': self._network2(self._network1(images))}
def postprocess(self, prediction_dict, shapes):
return prediction_dict
def loss(self):
return tf.constant(0.0)
def updates(self):
return []
def restore_map(self):
return {}
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def regularization_losses(self):
return []
class ModelTest(test_case.TestCase):
def test_model_call(self):
detection_model = FakeModel()
def graph_fn():
return detection_model(tf.zeros((1, 128, 128, 3)))
result = self.execute(graph_fn, [])
self.assertEqual(result['prediction'].shape,
(1, 128, 128, 16))
def test_freeze(self):
detection_model = FakeModel()
detection_model(tf.zeros((1, 128, 128, 3)))
net1_var_shapes = [tuple(var.get_shape().as_list()) for var in
detection_model._network1.trainable_variables]
del detection_model
detection_model = FakeModel()
detection_model._network2.trainable = False
detection_model(tf.zeros((1, 128, 128, 3)))
var_shapes = [tuple(var.get_shape().as_list()) for var in
detection_model._network1.trainable_variables]
self.assertEqual(set(net1_var_shapes), set(var_shapes))
if __name__ == '__main__':
tf.test.main()
| 2,744 | 25.911765 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet based Faster R-CNN implementation in Keras.
See Deep Residual Learning for Image Recognition by He et al.
https://arxiv.org/abs/1512.03385
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models.keras_models import resnet_v1
from object_detection.utils import model_util
_RESNET_MODEL_CONV4_LAST_LAYERS = {
'resnet_v1_50': 'conv4_block6_out',
'resnet_v1_101': 'conv4_block23_out',
'resnet_v1_152': 'conv4_block36_out',
}
class FasterRCNNResnetKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster R-CNN with Resnet feature extractor implementation."""
def __init__(self,
is_training,
resnet_v1_base_model,
resnet_v1_base_model_name,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 16.')
super(FasterRCNNResnetKerasFeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
weight_decay)
self.classification_backbone = None
self._variable_dict = {}
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the first half of the Resnet v1 network.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A tensor with shape [batch, height, width, depth]
"""
if not self.classification_backbone:
self.classification_backbone = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=None,
weight_decay=self._weight_decay,
classes=None,
weights=None,
include_top=False
)
with tf.name_scope(name):
with tf.name_scope('ResnetV1'):
conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[
self._resnet_v1_base_model_name]
proposal_features = self.classification_backbone.get_layer(
name=conv4_last_layer).output
keras_model = tf.keras.Model(
inputs=self.classification_backbone.inputs,
outputs=proposal_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
This function reconstructs the "second half" of the ResNet v1
network after the part defined in `get_proposal_feature_extractor_model`.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
if not self.classification_backbone:
self.classification_backbone = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=None,
weight_decay=self._weight_decay,
classes=None,
weights=None,
include_top=False
)
with tf.name_scope(name):
with tf.name_scope('ResnetV1'):
conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[
self._resnet_v1_base_model_name]
proposal_feature_maps = self.classification_backbone.get_layer(
name=conv4_last_layer).output
proposal_classifier_features = self.classification_backbone.get_layer(
name='conv5_block3_out').output
keras_model = model_util.extract_submodel(
model=self.classification_backbone,
inputs=proposal_feature_maps,
outputs=proposal_classifier_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
class FasterRCNNResnet50KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet50 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
class FasterRCNNResnet101KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet101 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet101KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
class FasterRCNNResnet152KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet152 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet152KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
| 9,440 | 36.023529 | 80 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 FPN feature extractors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.utils import test_utils
class SSDResnetFPNFeatureExtractorTestBase(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
"""Helper test class for SSD Resnet v1 FPN feature extractors."""
@abc.abstractmethod
def _resnet_scope_name(self):
pass
@abc.abstractmethod
def _fpn_scope_name(self):
return 'fpn'
@abc.abstractmethod
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
min_depth=32,
use_keras=False):
pass
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=self.is_tf2())
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=self.is_tf2())
def test_extract_features_returns_correct_shapes_with_depth_multiplier(
self):
image_height = 256
image_width = 256
depth_multiplier = 0.5
expected_num_channels = int(256 * depth_multiplier)
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, expected_num_channels),
(2, 16, 16, expected_num_channels),
(2, 8, 8, expected_num_channels),
(2, 4, 4, expected_num_channels),
(2, 2, 2, expected_num_channels)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=self.is_tf2())
def test_extract_features_returns_correct_shapes_with_min_depth(
self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
min_depth = 320
expected_feature_map_shape = [(2, 32, 32, min_depth),
(2, 16, 16, min_depth),
(2, 8, 8, min_depth),
(2, 4, 4, min_depth),
(2, 2, 2, min_depth)]
with test_utils.GraphContextOrNone() as g:
image_tensor = tf.random.uniform([2, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, min_depth=min_depth,
use_keras=self.is_tf2())
def graph_fn():
if self.is_tf2():
return feature_extractor(image_tensor)
return feature_extractor.extract_features(image_tensor)
feature_maps = self.execute(graph_fn, [], graph=g)
for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shape):
self.assertAllEqual(feature_map.shape, expected_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 254
image_width = 254
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=self.is_tf2())
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=self.is_tf2())
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image_np = np.random.rand(4, image_height, image_width, 3)
with test_utils.GraphContextOrNone() as g:
test_image = tf.constant(test_image_np)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=self.is_tf2())
def graph_fn():
preprocessed_image = feature_extractor.preprocess(test_image)
return preprocessed_image
preprocessed_image_out = self.execute(graph_fn, [], graph=g)
self.assertAllClose(preprocessed_image_out,
test_image_np - [[123.68, 116.779, 103.939]])
def test_variables_only_created_in_scope(self):
if self.is_tf2():
self.skipTest('test_variables_only_created_in_scope is only tf1')
depth_multiplier = 1
pad_to_multiple = 1
scope_name = self._resnet_scope_name()
self.check_feature_extractor_variables_under_scope(
depth_multiplier,
pad_to_multiple,
scope_name,
use_keras=self.is_tf2())
def test_variable_count(self):
if self.is_tf2():
self.skipTest('test_variable_count is only tf1')
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier,
pad_to_multiple,
use_keras=self.is_tf2())
# The number of expected variables in resnet_v1_50, resnet_v1_101,
# and resnet_v1_152 is 279, 534, and 789 respectively.
expected_variables_len = 279
scope_name = self._resnet_scope_name()
if scope_name in ('ResNet101V1_FPN', 'resnet_v1_101'):
expected_variables_len = 534
elif scope_name in ('ResNet152V1_FPN', 'resnet_v1_152'):
expected_variables_len = 789
self.assertEqual(len(variables), expected_variables_len)
| 7,538 | 38.062176 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class for ssd_mobilenet_v3_feature_extractor."""
import abc
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
class _SsdMobilenetV3FeatureExtractorTestBase(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
"""Base class for MobilenetV3 tests."""
@abc.abstractmethod
def _get_input_sizes(self):
"""Return feature map sizes for the two inputs to SSD head."""
pass
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]),
(2, 4, 4, input_feature_sizes[1]),
(2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_keras=False)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]),
(2, 10, 10, input_feature_sizes[1]),
(2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]),
(2, 10, 10, input_feature_sizes[1]),
(2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_has_fused_batchnorm(self):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
| 4,543 | 39.212389 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_inc_res.FasterRCNNInceptionResnetV2KerasFeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 19, 19, 1088])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 28, 28, 1088])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1088])
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
self.assertAllEqual(features_shape.numpy(), [2, 9, 9, 1536])
if __name__ == '__main__':
tf.test.main()
| 3,528 | 42.567901 | 108 | py |
models | models-master/research/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing mobilenet_v2+FPN feature extractor for CenterNet."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMobileNetV2FPNFeatureExtractorTest(test_case.TestCase):
def test_center_net_mobilenet_v2_fpn_feature_extractor(self):
channel_means = (0., 0., 0.)
channel_stds = (1., 1., 1.)
bgr_ordering = False
model = (
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn(
channel_means, channel_stds, bgr_ordering,
use_separable_conv=False))
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 24))
# Pull out the FPN network.
output = model.get_layer('model_1')
for layer in output.layers:
# All convolution layers should be normal 2D convolutions.
if 'conv' in layer.name:
self.assertIsInstance(layer, tf.keras.layers.Conv2D)
def test_center_net_mobilenet_v2_fpn_feature_extractor_sep_conv(self):
channel_means = (0., 0., 0.)
channel_stds = (1., 1., 1.)
bgr_ordering = False
model = (
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn(
channel_means, channel_stds, bgr_ordering, use_separable_conv=True))
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 24))
# Pull out the FPN network.
backbone = model.get_layer('model')
first_conv = backbone.get_layer('Conv1')
self.assertEqual(32, first_conv.filters)
# Pull out the FPN network.
output = model.get_layer('model_1')
for layer in output.layers:
# Convolution layers with kernel size not equal to (1, 1) should be
# separable 2D convolutions.
if 'conv' in layer.name and layer.kernel_size != (1, 1):
self.assertIsInstance(layer, tf.keras.layers.SeparableConv2D)
def test_center_net_mobilenet_v2_fpn_feature_extractor_depth_multiplier(self):
channel_means = (0., 0., 0.)
channel_stds = (1., 1., 1.)
bgr_ordering = False
model = (
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn(
channel_means, channel_stds, bgr_ordering, use_separable_conv=True,
depth_multiplier=2.0))
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 24))
# Pull out the FPN network.
backbone = model.get_layer('model')
first_conv = backbone.get_layer('Conv1')
# Note that the first layer typically has 32 filters, but this model has
# a depth multiplier of 2.
self.assertEqual(64, first_conv.filters)
def test_center_net_mobilenet_v2_fpn_feature_extractor_interpolation(self):
channel_means = (0., 0., 0.)
channel_stds = (1., 1., 1.)
bgr_ordering = False
model = (
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn(
channel_means, channel_stds, bgr_ordering, use_separable_conv=True,
upsampling_interpolation='bilinear'))
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 24))
# Verify the upsampling layers in the FPN use 'bilinear' interpolation.
fpn = model.get_layer('model_1')
for layer in fpn.layers:
if 'up_sampling2d' in layer.name:
self.assertEqual('bilinear', layer.interpolation)
if __name__ == '__main__':
tf.test.main()
| 4,896 | 35.819549 | 81 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Keras-based MobilenetV1 FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import mobilenet_v1
from object_detection.models.keras_models import model_utils
from object_detection.utils import ops
from object_detection.utils import shape_utils
# A modified config of mobilenet v1 that makes it more detection friendly.
def _create_modified_mobilenet_config():
conv_def_block_12 = model_utils.ConvDefs(conv_name='conv_pw_12', filters=512)
conv_def_block_13 = model_utils.ConvDefs(conv_name='conv_pw_13', filters=256)
return [conv_def_block_12, conv_def_block_13]
class SSDMobileNetV1FpnKerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based MobilenetV1 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False,
name=None):
"""SSD Keras based FPN feature extractor Mobilenet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to MobileNet v1 layers
{Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise,
Conv2d_13_pointwise}, respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: whether to use depthwise convolutions. Default is False.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDMobileNetV1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._conv_defs = None
if self._use_depthwise:
self._conv_defs = _create_modified_mobilenet_config()
self._use_native_resize_op = use_native_resize_op
self._feature_blocks = [
'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise',
'Conv2d_13_pointwise'
]
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def build(self, input_shape):
full_mobilenet_v1 = mobilenet_v1.mobilenet_v1(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
weights=None,
use_explicit_padding=self._use_explicit_padding,
alpha=self._depth_multiplier,
min_depth=self._min_depth,
conv_defs=self._conv_defs,
include_top=False)
conv2d_3_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_3_relu').output
conv2d_5_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_5_relu').output
conv2d_11_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_11_relu').output
conv2d_13_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_13_relu').output
self.classification_backbone = tf.keras.Model(
inputs=full_mobilenet_v1.inputs,
outputs=[conv2d_3_pointwise, conv2d_5_pointwise,
conv2d_11_pointwise, conv2d_13_pointwise]
)
# pylint:disable=g-long-lambda
self._depth_fn = lambda d: max(
int(d * self._depth_multiplier), self._min_depth)
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._depth_fn(self._additional_layer_depth),
use_depthwise=self._use_depthwise,
use_explicit_padding=self._use_explicit_padding,
use_native_resize_op=self._use_native_resize_op,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
# Construct coarse feature layers
padding = 'VALID' if self._use_explicit_padding else 'SAME'
kernel_size = 3
stride = 2
for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1):
coarse_feature_layers = []
if self._use_explicit_padding:
def fixed_padding(features, kernel_size=kernel_size):
return ops.fixed_padding(features, kernel_size)
coarse_feature_layers.append(tf.keras.layers.Lambda(
fixed_padding, name='fixed_padding'))
layer_name = 'bottom_up_Conv2d_{}'.format(
i - self._base_fpn_max_level + 13)
conv_block = feature_map_generators.create_conv_block(
self._use_depthwise, kernel_size, padding, stride, layer_name,
self._conv_hyperparams, self._is_training, self._freeze_batchnorm,
self._depth_fn(self._additional_layer_depth))
coarse_feature_layers.extend(conv_block)
self._coarse_feature_layers.append(coarse_feature_layers)
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append(self._feature_blocks[level - 2])
feature_start_index = len(self._feature_blocks) - self._num_levels
fpn_input_image_features = [
(key, image_features[feature_start_index + index])
for index, key in enumerate(feature_block_list)]
fpn_features = self._fpn_features_generator(fpn_input_image_features)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_{}'.format(
self._feature_blocks[level - 2])])
last_feature_map = fpn_features['top_down_{}'.format(
self._feature_blocks[self._base_fpn_max_level - 2])]
for coarse_feature_layers in self._coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
return feature_maps
def restore_from_classification_checkpoint_fn(self, feature_extractor_scope):
"""Returns a map for restoring from an (object-based) checkpoint.
Args:
feature_extractor_scope: A scope name for the feature extractor (unused).
Returns:
A dict mapping keys to Keras models
"""
return {'feature_extractor': self.classification_backbone}
| 11,195 | 42.734375 | 80 | py |
models | models-master/research/object_detection/models/feature_map_generators.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate a list of feature maps based on image features.
Provides several feature map generators that can be used to build object
detection feature extractors.
Object detection feature extractors usually are built by stacking two components
- A base feature extractor such as Inception V3 and a feature map generator.
Feature map generators build on the base feature extractors and produce a list
of final feature maps.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.utils import ops
from object_detection.utils import shape_utils
# Activation bound used for TPU v1. Activations will be clipped to
# [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with
# use_bounded_activations enabled.
ACTIVATION_BOUND = 6.0
def get_depth_fn(depth_multiplier, min_depth):
"""Builds a callable to compute depth (output channels) of conv filters.
Args:
depth_multiplier: a multiplier for the nominal depth.
min_depth: a lower bound on the depth of filters.
Returns:
A callable that takes in a nominal depth and returns the depth to use.
"""
def multiply_depth(depth):
new_depth = int(depth * depth_multiplier)
return max(new_depth, min_depth)
return multiply_depth
def create_conv_block(
use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams,
is_training, freeze_batchnorm, depth):
"""Create Keras layers for depthwise & non-depthwise convolutions.
Args:
use_depthwise: Whether to use depthwise separable conv instead of regular
conv.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters. Can be an int if both values are the same.
padding: One of 'VALID' or 'SAME'.
stride: A list of length 2: [stride_height, stride_width], specifying the
convolution stride. Can be an int if both strides are the same.
layer_name: String. The name of the layer.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Indicates whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
depth: Depth of output feature maps.
Returns:
A list of conv layers.
"""
layers = []
if use_depthwise:
kwargs = conv_hyperparams.params()
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
layers.append(
tf.keras.layers.SeparableConv2D(
depth, [kernel_size, kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**kwargs))
else:
layers.append(tf.keras.layers.Conv2D(
depth,
[kernel_size, kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
return layers
class KerasMultiResolutionFeatureMaps(tf.keras.Model):
"""Generates multi resolution feature maps from input image features.
A Keras model that generates multi-scale feature maps for detection as in the
SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, when called on inputs it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
When this feature generator object is called on input image_features:
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
def __init__(self,
feature_map_layout,
depth_multiplier,
min_depth,
insert_1x1_conv,
is_training,
conv_hyperparams,
freeze_batchnorm,
name=None):
"""Constructor.
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from
the feature map (instead of using the provided 'layer_depth' parameter).
In this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution
operations. Note that the current implementation only supports
generating new layers using convolutions of stride 2 (resulting in a
spatial resolution reduction by a factor of 2), and will be extended to
a more flexible design. Convolution kernel size is set to 3 by default,
and can be customized by 'conv_kernel_size' parameter (similarily,
'conv_kernel_size' should be set to -1 if 'from_layer' is specified).
The created convolution operation will be a normal 2D convolution by
default, and a depthwise convolution followed by 1x1 convolution if
'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1
convolution should be inserted before shrinking the feature map.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasMultiResolutionFeatureMaps, self).__init__(name=name)
self.feature_map_layout = feature_map_layout
self.convolutions = []
depth_fn = get_depth_fn(depth_multiplier, min_depth)
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
net = []
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
base_from_layer = from_layer
else:
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth // 2))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth // 2),
[1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
# We define this function here while capturing the value of
# conv_kernel_size, to avoid holding a reference to the loop variable
# conv_kernel_size inside of a lambda function
def fixed_padding(features, kernel_size=conv_kernel_size):
return ops.fixed_padding(features, kernel_size)
net.append(tf.keras.layers.Lambda(fixed_padding))
# TODO(rathodv): Add some utilities to simplify the creation of
# Depthwise & non-depthwise convolutions w/ normalization & activations
if use_depthwise:
net.append(tf.keras.layers.DepthwiseConv2D(
[conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_depthwise_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name + '_depthwise'))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
else:
net.append(tf.keras.layers.Conv2D(
depth_fn(layer_depth),
[conv_kernel_size, conv_kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
# Until certain bugs are fixed in checkpointable lists,
# this net must be appended only once it's been filled with layers
self.convolutions.append(net)
def call(self, image_features):
"""Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input.
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
feature_maps = []
feature_map_keys = []
for index, from_layer in enumerate(self.feature_map_layout['from_layer']):
if from_layer:
feature_map = image_features[from_layer]
feature_map_keys.append(from_layer)
else:
feature_map = feature_maps[-1]
for layer in self.convolutions[index]:
feature_map = layer(feature_map)
layer_name = self.convolutions[index][-1].name
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def multi_resolution_feature_maps(feature_map_layout, depth_multiplier,
min_depth, insert_1x1_conv, image_features,
pool_residual=False):
"""Generates multi resolution feature maps from input image features.
Generates multi-scale feature maps for detection as in the SSD papers by
Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from the
feature map (instead of using the provided 'layer_depth' parameter). In
this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution operations.
Note that the current implementation only supports generating new layers
using convolutions of stride 2 (resulting in a spatial resolution
reduction by a factor of 2), and will be extended to a more flexible
design. Convolution kernel size is set to 3 by default, and can be
customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size'
should be set to -1 if 'from_layer' is specified). The created convolution
operation will be a normal 2D convolution by default, and a depthwise
convolution followed by 1x1 convolution if 'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution
should be inserted before shrinking the feature map.
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
pool_residual: Whether to add an average pooling layer followed by a
residual connection between subsequent feature maps when the channel
depth match. For example, with option 'layer_depth': [-1, 512, 256, 256],
a pooling and residual layer is added between the third and forth feature
map. This option is better used with Weight Shared Convolution Box
Predictor when all feature maps have the same channel depth to encourage
more consistent features across multi-scale feature maps.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: if the number entries in 'from_layer' and
'layer_depth' do not match.
ValueError: if the generated layer does not have the same resolution
as specified.
"""
depth_fn = get_depth_fn(depth_multiplier, min_depth)
feature_map_keys = []
feature_maps = []
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
feature_map = image_features[from_layer]
base_from_layer = from_layer
feature_map_keys.append(from_layer)
else:
pre_layer = feature_maps[-1]
pre_layer_depth = pre_layer.get_shape().as_list()[3]
intermediate_layer = pre_layer
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth // 2))
intermediate_layer = slim.conv2d(
pre_layer,
depth_fn(layer_depth // 2), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
intermediate_layer = ops.fixed_padding(
intermediate_layer, conv_kernel_size)
if use_depthwise:
feature_map = slim.separable_conv2d(
intermediate_layer,
None, [conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
stride=stride,
scope=layer_name + '_depthwise')
feature_map = slim.conv2d(
feature_map,
depth_fn(layer_depth), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
if pool_residual and pre_layer_depth == depth_fn(layer_depth):
if use_explicit_padding:
pre_layer = ops.fixed_padding(pre_layer, conv_kernel_size)
feature_map += slim.avg_pool2d(
pre_layer, [conv_kernel_size, conv_kernel_size],
padding=padding,
stride=2,
scope=layer_name + '_pool')
else:
feature_map = slim.conv2d(
intermediate_layer,
depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size],
padding=padding,
stride=stride,
scope=layer_name)
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
class KerasFpnTopDownFeatureMaps(tf.keras.Model):
"""Generates Keras based `top-down` feature maps for Feature Pyramid Networks.
See https://arxiv.org/abs/1612.03144 for details.
"""
def __init__(self,
num_levels,
depth,
is_training,
conv_hyperparams,
freeze_batchnorm,
use_depthwise=False,
use_explicit_padding=False,
use_bounded_activations=False,
use_native_resize_op=False,
scope=None,
name=None):
"""Constructor.
Args:
num_levels: the number of image features.
depth: depth of output feature maps.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_depthwise: whether to use depthwise separable conv instead of regular
conv.
use_explicit_padding: whether to use explicit padding.
use_bounded_activations: Whether or not to clip activations to range
[-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend
themselves to quantized inference.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op
for the upsampling process instead of reshape and broadcasting
implementation.
scope: A scope name to wrap this op under.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasFpnTopDownFeatureMaps, self).__init__(name=name)
self.scope = scope if scope else 'top_down'
self.top_layers = []
self.residual_blocks = []
self.top_down_blocks = []
self.reshape_blocks = []
self.conv_layers = []
padding = 'VALID' if use_explicit_padding else 'SAME'
stride = 1
kernel_size = 3
def clip_by_value(features):
return tf.clip_by_value(features, -ACTIVATION_BOUND, ACTIVATION_BOUND)
# top layers
self.top_layers.append(tf.keras.layers.Conv2D(
depth, [1, 1], strides=stride, padding=padding,
name='projection_%d' % num_levels,
**conv_hyperparams.params(use_bias=True)))
if use_bounded_activations:
self.top_layers.append(tf.keras.layers.Lambda(
clip_by_value, name='clip_by_value'))
for level in reversed(list(range(num_levels - 1))):
# to generate residual from image features
residual_net = []
# to preprocess top_down (the image feature map from last layer)
top_down_net = []
# to reshape top_down according to residual if necessary
reshaped_residual = []
# to apply convolution layers to feature map
conv_net = []
# residual block
residual_net.append(tf.keras.layers.Conv2D(
depth, [1, 1], padding=padding, strides=1,
name='projection_%d' % (level + 1),
**conv_hyperparams.params(use_bias=True)))
if use_bounded_activations:
residual_net.append(tf.keras.layers.Lambda(
clip_by_value, name='clip_by_value'))
# top-down block
# TODO (b/128922690): clean-up of ops.nearest_neighbor_upsampling
if use_native_resize_op:
def resize_nearest_neighbor(image):
image_shape = shape_utils.combined_static_and_dynamic_shape(image)
return tf.image.resize_nearest_neighbor(
image, [image_shape[1] * 2, image_shape[2] * 2])
top_down_net.append(tf.keras.layers.Lambda(
resize_nearest_neighbor, name='nearest_neighbor_upsampling'))
else:
def nearest_neighbor_upsampling(image):
return ops.nearest_neighbor_upsampling(image, scale=2)
top_down_net.append(tf.keras.layers.Lambda(
nearest_neighbor_upsampling, name='nearest_neighbor_upsampling'))
# reshape block
if use_explicit_padding:
def reshape(inputs):
residual_shape = tf.shape(inputs[0])
return inputs[1][:, :residual_shape[1], :residual_shape[2], :]
reshaped_residual.append(
tf.keras.layers.Lambda(reshape, name='reshape'))
# down layers
if use_bounded_activations:
conv_net.append(tf.keras.layers.Lambda(
clip_by_value, name='clip_by_value'))
if use_explicit_padding:
def fixed_padding(features, kernel_size=kernel_size):
return ops.fixed_padding(features, kernel_size)
conv_net.append(tf.keras.layers.Lambda(
fixed_padding, name='fixed_padding'))
layer_name = 'smoothing_%d' % (level + 1)
conv_block = create_conv_block(
use_depthwise, kernel_size, padding, stride, layer_name,
conv_hyperparams, is_training, freeze_batchnorm, depth)
conv_net.extend(conv_block)
self.residual_blocks.append(residual_net)
self.top_down_blocks.append(top_down_net)
self.reshape_blocks.append(reshaped_residual)
self.conv_layers.append(conv_net)
def call(self, image_features):
"""Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input.
Args:
image_features: list of tuples of (tensor_name, image_feature_tensor).
Spatial resolutions of succesive tensors must reduce exactly by a factor
of 2.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
output_feature_maps_list = []
output_feature_map_keys = []
with tf.name_scope(self.scope):
top_down = image_features[-1][1]
for layer in self.top_layers:
top_down = layer(top_down)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append('top_down_%s' % image_features[-1][0])
num_levels = len(image_features)
for index, level in enumerate(reversed(list(range(num_levels - 1)))):
residual = image_features[level][1]
top_down = output_feature_maps_list[-1]
for layer in self.residual_blocks[index]:
residual = layer(residual)
for layer in self.top_down_blocks[index]:
top_down = layer(top_down)
for layer in self.reshape_blocks[index]:
top_down = layer([residual, top_down])
top_down += residual
for layer in self.conv_layers[index]:
top_down = layer(top_down)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append('top_down_%s' % image_features[level][0])
return collections.OrderedDict(reversed(
list(zip(output_feature_map_keys, output_feature_maps_list))))
def fpn_top_down_feature_maps(image_features,
depth,
use_depthwise=False,
use_explicit_padding=False,
use_bounded_activations=False,
scope=None,
use_native_resize_op=False):
"""Generates `top-down` feature maps for Feature Pyramid Networks.
See https://arxiv.org/abs/1612.03144 for details.
Args:
image_features: list of tuples of (tensor_name, image_feature_tensor).
Spatial resolutions of succesive tensors must reduce exactly by a factor
of 2.
depth: depth of output feature maps.
use_depthwise: whether to use depthwise separable conv instead of regular
conv.
use_explicit_padding: whether to use explicit padding.
use_bounded_activations: Whether or not to clip activations to range
[-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend
themselves to quantized inference.
scope: A scope name to wrap this op under.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
with tf.name_scope(scope, 'top_down'):
num_levels = len(image_features)
output_feature_maps_list = []
output_feature_map_keys = []
padding = 'VALID' if use_explicit_padding else 'SAME'
kernel_size = 3
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d], padding=padding, stride=1):
top_down = slim.conv2d(
image_features[-1][1],
depth, [1, 1], activation_fn=None, normalizer_fn=None,
scope='projection_%d' % num_levels)
if use_bounded_activations:
top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append(
'top_down_%s' % image_features[-1][0])
for level in reversed(list(range(num_levels - 1))):
if use_native_resize_op:
with tf.name_scope('nearest_neighbor_upsampling'):
top_down_shape = shape_utils.combined_static_and_dynamic_shape(
top_down)
top_down = tf.image.resize_nearest_neighbor(
top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2])
else:
top_down = ops.nearest_neighbor_upsampling(top_down, scale=2)
residual = slim.conv2d(
image_features[level][1], depth, [1, 1],
activation_fn=None, normalizer_fn=None,
scope='projection_%d' % (level + 1))
if use_bounded_activations:
residual = tf.clip_by_value(residual, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
if use_explicit_padding:
# slice top_down to the same shape as residual
residual_shape = tf.shape(residual)
top_down = top_down[:, :residual_shape[1], :residual_shape[2], :]
top_down += residual
if use_bounded_activations:
top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
if use_depthwise:
conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
pre_output = top_down
if use_explicit_padding:
pre_output = ops.fixed_padding(pre_output, kernel_size)
output_feature_maps_list.append(conv_op(
pre_output,
depth, [kernel_size, kernel_size],
scope='smoothing_%d' % (level + 1)))
output_feature_map_keys.append('top_down_%s' % image_features[level][0])
return collections.OrderedDict(reversed(
list(zip(output_feature_map_keys, output_feature_maps_list))))
def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers,
image_features, replace_pool_with_conv=False):
"""Generates pooling pyramid feature maps.
The pooling pyramid feature maps is motivated by
multi_resolution_feature_maps. The main difference are that it is simpler and
reduces the number of free parameters.
More specifically:
- Instead of using convolutions to shrink the feature map, it uses max
pooling, therefore totally gets rid of the parameters in convolution.
- By pooling feature from larger map up to a single cell, it generates
features in the same feature space.
- Instead of independently making box predictions from individual maps, it
shares the same classifier across different feature maps, therefore reduces
the "mis-calibration" across different scales.
See go/ppn-detection for more details.
Args:
base_feature_map_depth: Depth of the base feature before the max pooling.
num_layers: Number of layers used to make predictions. They are pooled
from the base feature.
image_features: A dictionary of handles to activation tensors from the
feature extractor.
replace_pool_with_conv: Whether or not to replace pooling operations with
convolutions in the PPN. Default is False.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: image_features does not contain exactly one entry
"""
if len(image_features) != 1:
raise ValueError('image_features should be a dictionary of length 1.')
image_features = image_features[list(image_features.keys())[0]]
feature_map_keys = []
feature_maps = []
feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth
if base_feature_map_depth > 0:
image_features = slim.conv2d(
image_features,
base_feature_map_depth,
[1, 1], # kernel size
padding='SAME', stride=1, scope=feature_map_key)
# Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for
# TPU v1 compatibility. Without the following dummy op, TPU runtime
# compiler will combine the convolution with one max-pooling below into a
# single cycle, so getting the conv2d feature becomes impossible.
image_features = slim.max_pool2d(
image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(image_features)
feature_map = image_features
if replace_pool_with_conv:
with slim.arg_scope([slim.conv2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i,
base_feature_map_depth)
feature_map = slim.conv2d(
feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
else:
with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'MaxPool2d_%d_2x2' % i
feature_map = slim.max_pool2d(
feature_map, [2, 2], padding='SAME', scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
| 35,694 | 42.162031 | 80 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 FPN feature extractors."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet50V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet50v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (
ssd_resnet_v1_fpn_feature_extractor.SSDResnet50V1FpnFeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _resnet_scope_name(self):
return 'resnet_v1_50'
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet101V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet101v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (
ssd_resnet_v1_fpn_feature_extractor.SSDResnet101V1FpnFeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _resnet_scope_name(self):
return 'resnet_v1_101'
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet152V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet152v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (
ssd_resnet_v1_fpn_feature_extractor.SSDResnet152V1FpnFeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _resnet_scope_name(self):
return 'resnet_v1_152'
if __name__ == '__main__':
tf.test.main()
| 3,411 | 38.674419 | 80 | py |
models | models-master/research/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet V2[1] + FPN[2] feature extractor for CenterNet[3] meta architecture.
[1]: https://arxiv.org/abs/1801.04381
[2]: https://arxiv.org/abs/1612.03144.
[3]: https://arxiv.org/abs/1904.07850
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2
_MOBILENET_V2_FPN_SKIP_LAYERS = [
'block_2_add', 'block_5_add', 'block_9_add', 'out_relu'
]
class CenterNetMobileNetV2FPNFeatureExtractor(
center_net_meta_arch.CenterNetFeatureExtractor):
"""The MobileNet V2 with FPN skip layers feature extractor for CenterNet."""
def __init__(self,
mobilenet_v2_net,
channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.),
bgr_ordering=False,
use_separable_conv=False,
upsampling_interpolation='nearest'):
"""Intializes the feature extractor.
Args:
mobilenet_v2_net: The underlying mobilenet_v2 network to use.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
use_separable_conv: If set to True, all convolutional layers in the FPN
network will be replaced by separable convolutions.
upsampling_interpolation: A string (one of 'nearest' or 'bilinear')
indicating which interpolation method to use for the upsampling ops in
the FPN.
"""
super(CenterNetMobileNetV2FPNFeatureExtractor, self).__init__(
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
self._base_model = mobilenet_v2_net
output = self._base_model(self._base_model.input)
# Add pyramid feature network on every layer that has stride 2.
skip_outputs = [
self._base_model.get_layer(skip_layer_name).output
for skip_layer_name in _MOBILENET_V2_FPN_SKIP_LAYERS
]
self._fpn_model = tf.keras.models.Model(
inputs=self._base_model.input, outputs=skip_outputs)
fpn_outputs = self._fpn_model(self._base_model.input)
# Construct the top-down feature maps -- we start with an output of
# 7x7x1280, which we continually upsample, apply a residual on and merge.
# This results in a 56x56x24 output volume.
top_layer = fpn_outputs[-1]
# Use normal convolutional layer since the kernel_size is 1.
residual_op = tf.keras.layers.Conv2D(
filters=64, kernel_size=1, strides=1, padding='same')
top_down = residual_op(top_layer)
num_filters_list = [64, 32, 24]
for i, num_filters in enumerate(num_filters_list):
level_ind = len(num_filters_list) - 1 - i
# Upsample.
upsample_op = tf.keras.layers.UpSampling2D(
2, interpolation=upsampling_interpolation)
top_down = upsample_op(top_down)
# Residual (skip-connection) from bottom-up pathway.
# Use normal convolutional layer since the kernel_size is 1.
residual_op = tf.keras.layers.Conv2D(
filters=num_filters, kernel_size=1, strides=1, padding='same')
residual = residual_op(fpn_outputs[level_ind])
# Merge.
top_down = top_down + residual
next_num_filters = num_filters_list[i + 1] if i + 1 <= 2 else 24
if use_separable_conv:
conv = tf.keras.layers.SeparableConv2D(
filters=next_num_filters, kernel_size=3, strides=1, padding='same')
else:
conv = tf.keras.layers.Conv2D(
filters=next_num_filters, kernel_size=3, strides=1, padding='same')
top_down = conv(top_down)
top_down = tf.keras.layers.BatchNormalization()(top_down)
top_down = tf.keras.layers.ReLU()(top_down)
output = top_down
self._feature_extractor_model = tf.keras.models.Model(
inputs=self._base_model.input, outputs=output)
def preprocess(self, resized_inputs):
resized_inputs = super(CenterNetMobileNetV2FPNFeatureExtractor,
self).preprocess(resized_inputs)
return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs)
def load_feature_extractor_weights(self, path):
self._base_model.load_weights(path)
@property
def classification_backbone(self):
return self._base_model
def call(self, inputs):
return [self._feature_extractor_model(inputs)]
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""The number of feature outputs returned by the feature extractor."""
return 1
def mobilenet_v2_fpn(channel_means, channel_stds, bgr_ordering,
use_separable_conv=False, depth_multiplier=1.0,
upsampling_interpolation='nearest', **kwargs):
"""The MobileNetV2+FPN backbone for CenterNet."""
del kwargs
# Set to batchnorm_training to True for now.
network = mobilenetv2.mobilenet_v2(
batchnorm_training=True,
alpha=depth_multiplier,
include_top=False,
weights='imagenet' if depth_multiplier == 1.0 else None)
return CenterNetMobileNetV2FPNFeatureExtractor(
network,
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering,
use_separable_conv=use_separable_conv,
upsampling_interpolation=upsampling_interpolation)
| 6,331 | 37.609756 | 81 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster RCNN Keras-based Resnet V1 FPN Feature Extractor."""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import resnet_v1
from object_detection.utils import ops
_RESNET_MODEL_OUTPUT_LAYERS = {
'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out'],
'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out',
'conv4_block36_out', 'conv5_block3_out'],
}
class _ResnetFPN(tf.keras.layers.Layer):
"""Construct Resnet FPN layer."""
def __init__(self,
backbone_classifier,
fpn_features_generator,
coarse_feature_layers,
pad_to_multiple,
fpn_min_level,
resnet_block_names,
base_fpn_max_level):
"""Constructor.
Args:
backbone_classifier: Classifier backbone. Should be one of 'resnet_v1_50',
'resnet_v1_101', 'resnet_v1_152'.
fpn_features_generator: KerasFpnTopDownFeatureMaps that accepts a
dictionary of features and returns a ordered dictionary of fpn features.
coarse_feature_layers: Coarse feature layers for fpn.
pad_to_multiple: An integer multiple to pad input image.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet v1 layers.
resnet_block_names: a list of block names of resnet.
base_fpn_max_level: maximum level of fpn without coarse feature layers.
"""
super(_ResnetFPN, self).__init__()
self.classification_backbone = backbone_classifier
self.fpn_features_generator = fpn_features_generator
self.coarse_feature_layers = coarse_feature_layers
self.pad_to_multiple = pad_to_multiple
self._fpn_min_level = fpn_min_level
self._resnet_block_names = resnet_block_names
self._base_fpn_max_level = base_fpn_max_level
def call(self, inputs):
"""Create internal Resnet FPN layer.
Args:
inputs: A [batch, height_out, width_out, channels] float32 tensor
representing a batch of images.
Returns:
feature_maps: A list of tensors with shape [batch, height, width, depth]
represent extracted features.
"""
inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple)
backbone_outputs = self.classification_backbone(inputs)
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
feature_block_map = dict(
list(zip(self._resnet_block_names, backbone_outputs)))
fpn_input_image_features = [
(feature_block, feature_block_map[feature_block])
for feature_block in feature_block_list]
fpn_features = self.fpn_features_generator(fpn_input_image_features)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_block{}'.format(level-1)])
last_feature_map = fpn_features['top_down_block{}'.format(
self._base_fpn_max_level - 1)]
for coarse_feature_layers in self.coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
return feature_maps
class FasterRCNNResnetV1FpnKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features."""
def __init__(self,
is_training,
resnet_v1_base_model,
resnet_v1_base_model_name,
first_stage_features_stride,
conv_hyperparams,
batch_norm_trainable=True,
pad_to_multiple=32,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
first_stage_features_stride: See base class.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
batch_norm_trainable: See base class.
pad_to_multiple: An integer multiple to pad input image.
weight_decay: See base class.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet v1 layers.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNResnetV1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
self._conv_hyperparams = conv_hyperparams
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._freeze_batchnorm = (not batch_norm_trainable)
self._pad_to_multiple = pad_to_multiple
self._override_base_feature_extractor_hyperparams = \
override_base_feature_extractor_hyperparams
self._resnet_block_names = ['block1', 'block2', 'block3', 'block4']
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the Resnet v1 FPN network.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A list of tensors with shape [batch, height, width, depth]
"""
with tf.name_scope(name):
with tf.name_scope('ResnetV1FPN'):
full_resnet_v1_model = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=(self._conv_hyperparams if
self._override_base_feature_extractor_hyperparams
else None),
classes=None,
weights=None,
include_top=False)
output_layers = _RESNET_MODEL_OUTPUT_LAYERS[
self._resnet_v1_base_model_name]
outputs = [full_resnet_v1_model.get_layer(output_layer_name).output
for output_layer_name in output_layers]
self.classification_backbone = tf.keras.Model(
inputs=full_resnet_v1_model.inputs,
outputs=outputs)
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._additional_layer_depth,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
# Construct coarse feature layers
for i in range(self._base_fpn_max_level, self._fpn_max_level):
layers = []
layer_name = 'bottom_up_block{}'.format(i)
layers.append(
tf.keras.layers.Conv2D(
self._additional_layer_depth,
[3, 3],
padding='SAME',
strides=2,
name=layer_name + '_conv',
**self._conv_hyperparams.params()))
layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
self._conv_hyperparams.build_activation_layer(
name=layer_name))
self._coarse_feature_layers.append(layers)
feature_extractor_model = _ResnetFPN(self.classification_backbone,
self._fpn_features_generator,
self._coarse_feature_layers,
self._pad_to_multiple,
self._fpn_min_level,
self._resnet_block_names,
self._base_fpn_max_level)
return feature_extractor_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
Construct two fully connected layer to extract the box classifier features.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, 1, 1, 1024]
representing box classifier features for each proposal.
"""
with tf.name_scope(name):
with tf.name_scope('ResnetV1FPN'):
feature_extractor_model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=1024, activation='relu'),
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm)),
tf.keras.layers.Dense(units=1024, activation='relu'),
tf.keras.layers.Reshape((1, 1, 1024))
])
return feature_extractor_model
class FasterRCNNResnet50FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet50 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=True,
conv_hyperparams=None,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
conv_hyperparams: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet50FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams
)
class FasterRCNNResnet101FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet101 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=True,
conv_hyperparams=None,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
conv_hyperparams: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet101FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class FasterRCNNResnet152FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet152 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=True,
conv_hyperparams=None,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
conv_hyperparams: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet152FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
| 17,933 | 40.227586 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Keras-based MobilenetV2 FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import mobilenet_v2
from object_detection.models.keras_models import model_utils
from object_detection.utils import ops
from object_detection.utils import shape_utils
# Total number of blocks in Mobilenet_V2 base network.
NUM_LAYERS = 19
# A modified config of mobilenet v2 that makes it more detection friendly.
def _create_modified_mobilenet_config():
last_conv = model_utils.ConvDefs(conv_name='Conv_1', filters=256)
return [last_conv]
class SSDMobileNetV2FpnKerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based MobilenetV2 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False,
name=None):
"""SSD Keras based FPN feature extractor Mobilenet v2 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to MobileNet v2 layers
{layer_4, layer_7, layer_14, layer_19}, respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDMobileNetV2FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._conv_defs = None
if self._use_depthwise:
self._conv_defs = _create_modified_mobilenet_config()
self._use_native_resize_op = use_native_resize_op
self._feature_blocks = ['layer_4', 'layer_7', 'layer_14', 'layer_19']
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def build(self, input_shape):
full_mobilenet_v2 = mobilenet_v2.mobilenet_v2(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
weights=None,
use_explicit_padding=self._use_explicit_padding,
alpha=self._depth_multiplier,
min_depth=self._min_depth,
include_top=False,
input_shape=(None, None, input_shape[-1]))
layer_names = [layer.name for layer in full_mobilenet_v2.layers]
outputs = []
for layer_idx in [4, 7, 14]:
add_name = 'block_{}_add'.format(layer_idx - 2)
project_name = 'block_{}_project_BN'.format(layer_idx - 2)
output_layer_name = add_name if add_name in layer_names else project_name
outputs.append(full_mobilenet_v2.get_layer(output_layer_name).output)
layer_19 = full_mobilenet_v2.get_layer(name='out_relu').output
outputs.append(layer_19)
self.classification_backbone = tf.keras.Model(
inputs=full_mobilenet_v2.inputs,
outputs=outputs)
# pylint:disable=g-long-lambda
self._depth_fn = lambda d: max(
int(d * self._depth_multiplier), self._min_depth)
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._depth_fn(self._additional_layer_depth),
use_depthwise=self._use_depthwise,
use_explicit_padding=self._use_explicit_padding,
use_native_resize_op=self._use_native_resize_op,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
# Construct coarse feature layers
padding = 'VALID' if self._use_explicit_padding else 'SAME'
kernel_size = 3
stride = 2
for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1):
coarse_feature_layers = []
if self._use_explicit_padding:
def fixed_padding(features, kernel_size=kernel_size):
return ops.fixed_padding(features, kernel_size)
coarse_feature_layers.append(tf.keras.layers.Lambda(
fixed_padding, name='fixed_padding'))
layer_name = 'bottom_up_Conv2d_{}'.format(
i - self._base_fpn_max_level + NUM_LAYERS)
conv_block = feature_map_generators.create_conv_block(
self._use_depthwise, kernel_size, padding, stride, layer_name,
self._conv_hyperparams, self._is_training, self._freeze_batchnorm,
self._depth_fn(self._additional_layer_depth))
coarse_feature_layers.extend(conv_block)
self._coarse_feature_layers.append(coarse_feature_layers)
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append(self._feature_blocks[level - 2])
feature_start_index = len(self._feature_blocks) - self._num_levels
fpn_input_image_features = [
(key, image_features[feature_start_index + index])
for index, key in enumerate(feature_block_list)]
fpn_features = self._fpn_features_generator(fpn_input_image_features)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_{}'.format(
self._feature_blocks[level - 2])])
last_feature_map = fpn_features['top_down_{}'.format(
self._feature_blocks[self._base_fpn_max_level - 2])]
for coarse_feature_layers in self._coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
return feature_maps
| 10,807 | 43.295082 | 80 | py |
models | models-master/research/object_detection/models/center_net_hourglass_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hourglass[1] feature extractor for CenterNet[2] meta architecture.
[1]: https://arxiv.org/abs/1603.06937
[2]: https://arxiv.org/abs/1904.07850
"""
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.models.keras_models import hourglass_network
class CenterNetHourglassFeatureExtractor(
center_net_meta_arch.CenterNetFeatureExtractor):
"""The hourglass feature extractor for CenterNet.
This class is a thin wrapper around the HourglassFeatureExtractor class
along with some preprocessing methods inherited from the base class.
"""
def __init__(self, hourglass_net, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Intializes the feature extractor.
Args:
hourglass_net: The underlying hourglass network to use.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetHourglassFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
self._network = hourglass_net
def call(self, inputs):
return self._network(inputs)
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
return self._network.num_hourglasses
def hourglass_10(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-10 backbone for CenterNet."""
del kwargs
network = hourglass_network.hourglass_10(num_channels=32)
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def hourglass_20(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-20 backbone for CenterNet."""
del kwargs
network = hourglass_network.hourglass_20(num_channels=48)
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def hourglass_32(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-32 backbone for CenterNet."""
del kwargs
network = hourglass_network.hourglass_32(num_channels=48)
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def hourglass_52(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-52 backbone for CenterNet."""
del kwargs
network = hourglass_network.hourglass_52(num_channels=64)
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def hourglass_104(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-104 backbone for CenterNet."""
del kwargs
# TODO(vighneshb): update hourglass_104 signature to match with other
# hourglass networks.
network = hourglass_network.hourglass_104()
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
| 4,250 | 35.646552 | 80 | py |
models | models-master/research/object_detection/models/center_net_resnet_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnetv2 based feature extractors for CenterNet[1] meta architecture.
[1]: https://arxiv.org/abs/1904.07850
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor
class CenterNetResnetFeatureExtractor(CenterNetFeatureExtractor):
"""Resnet v2 base feature extractor for the CenterNet model."""
def __init__(self, resnet_type, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes the feature extractor with a specific ResNet architecture.
Args:
resnet_type: A string specifying which kind of ResNet to use. Currently
only `resnet_v2_50` and `resnet_v2_101` are supported.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetResnetFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
if resnet_type == 'resnet_v2_101':
self._base_model = tf.keras.applications.ResNet101V2(weights=None,
include_top=False)
output_layer = 'conv5_block3_out'
elif resnet_type == 'resnet_v2_50':
self._base_model = tf.keras.applications.ResNet50V2(weights=None,
include_top=False)
output_layer = 'conv5_block3_out'
else:
raise ValueError('Unknown Resnet Model {}'.format(resnet_type))
output_layer = self._base_model.get_layer(output_layer)
self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input,
outputs=output_layer.output)
resnet_output = self._resnet_model(self._base_model.input)
for num_filters in [256, 128, 64]:
# TODO(vighneshb) This section has a few differences from the paper
# Figure out how much of a performance impact they have.
# 1. We use a simple convolution instead of a deformable convolution
conv = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=3,
strides=1, padding='same')
resnet_output = conv(resnet_output)
resnet_output = tf.keras.layers.BatchNormalization()(resnet_output)
resnet_output = tf.keras.layers.ReLU()(resnet_output)
# 2. We use the default initialization for the convolution layers
# instead of initializing it to do bilinear upsampling.
conv_transpose = tf.keras.layers.Conv2DTranspose(filters=num_filters,
kernel_size=3, strides=2,
padding='same')
resnet_output = conv_transpose(resnet_output)
resnet_output = tf.keras.layers.BatchNormalization()(resnet_output)
resnet_output = tf.keras.layers.ReLU()(resnet_output)
self._feature_extractor_model = tf.keras.models.Model(
inputs=self._base_model.input, outputs=resnet_output)
def preprocess(self, resized_inputs):
"""Preprocess input images for the ResNet model.
This scales images in the range [0, 255] to the range [-1, 1]
Args:
resized_inputs: a [batch, height, width, channels] float32 tensor.
Returns:
outputs: a [batch, height, width, channels] float32 tensor.
"""
resized_inputs = super(CenterNetResnetFeatureExtractor, self).preprocess(
resized_inputs)
return tf.keras.applications.resnet_v2.preprocess_input(resized_inputs)
def load_feature_extractor_weights(self, path):
self._base_model.load_weights(path)
def call(self, inputs):
"""Returns image features extracted by the backbone.
Args:
inputs: An image tensor of shape [batch_size, input_height,
input_width, 3]
Returns:
features_list: A list of length 1 containing a tensor of shape
[batch_size, input_height // 4, input_width // 4, 64] containing
the features extracted by the ResNet.
"""
return [self._feature_extractor_model(inputs)]
@property
def num_feature_outputs(self):
return 1
@property
def out_stride(self):
return 4
@property
def classification_backbone(self):
return self._base_model
def resnet_v2_101(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v2 101 feature extractor."""
del kwargs
return CenterNetResnetFeatureExtractor(
resnet_type='resnet_v2_101',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering
)
def resnet_v2_50(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v2 50 feature extractor."""
del kwargs
return CenterNetResnetFeatureExtractor(
resnet_type='resnet_v2_50',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
| 5,892 | 37.266234 | 94 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V2 FPN feature extractors in SSD.
"""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_fpn_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
@parameterized.parameters(
{
'use_depthwise': False,
},
{
'use_depthwise': True,
},
)
class SsdMobilenetV2FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
is_training=True,
use_explicit_padding=False,
use_keras=False,
use_depthwise=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
use_depthwise: Whether to use depthwise convolutions.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v2_fpn_keras_feature_extractor
.SSDMobileNetV2FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
name='MobilenetV2_FPN'))
def test_extract_features_returns_correct_shapes_256(self,
use_depthwise):
use_keras = True
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_384(self,
use_depthwise):
use_keras = True
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_4_channels(self,
use_depthwise):
use_keras = True
image_height = 320
image_width = 320
num_channels = 4
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise,
num_channels=num_channels)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise,
num_channels=num_channels)
def test_extract_features_with_dynamic_image_shape(self,
use_depthwise):
use_keras = True
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self, use_depthwise):
use_keras = True
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self, use_depthwise):
use_keras = True
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_raises_error_with_invalid_image_size(
self, use_depthwise=False):
use_keras = True
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_preprocess_returns_correct_value_range(self,
use_depthwise):
use_keras = True
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
if __name__ == '__main__':
tf.test.main()
| 10,575 | 33.789474 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v1_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 FPN feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_fpn_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV1FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v1_fpn_feature_extractor.
SSDMobileNetV1FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_depthwise=True,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_384(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=False)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=False)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=False)
self.assertEqual(len(variables), 153)
def test_fused_batchnorm(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
if __name__ == '__main__':
tf.test.main()
| 8,728 | 41.169082 | 80 | py |
models | models-master/research/object_detection/models/ssd_efficientnet_bifpn_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Keras-based EfficientNet + BiFPN (EfficientDet) Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from keras import backend as keras_backend
from six.moves import range
from six.moves import zip
import tensorflow.compat.v2 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
try:
from official.legacy.image_classification.efficientnet import efficientnet_model
except ModuleNotFoundError:
from official.vision.image_classification.efficientnet import efficientnet_model
_EFFICIENTNET_LEVEL_ENDPOINTS = {
1: 'stack_0/block_0/project_bn',
2: 'stack_1/block_1/add',
3: 'stack_2/block_1/add',
4: 'stack_4/block_2/add',
5: 'stack_6/block_0/project_bn',
}
class SSDEfficientNetBiFPNKerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Keras-based EfficientNetBiFPN (EfficientDet) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level,
bifpn_max_level,
bifpn_num_iterations,
bifpn_num_filters,
bifpn_combine_method,
efficientnet_version,
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name=None):
"""SSD Keras-based EfficientNetBiFPN (EfficientDet) feature extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
efficientnet_version: the EfficientNet version to use for this feature
extractor's backbone.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for bifpn unsampling.
override_base_feature_extractor_hyperparams: Whether to override the
efficientnet backbone's default weight decay with the weight decay
defined by `conv_hyperparams`. Note, only overriding of weight decay is
currently supported.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetBiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
if depth_multiplier != 1.0:
raise ValueError('EfficientNetBiFPN does not support a non-default '
'depth_multiplier.')
if use_explicit_padding:
raise ValueError('EfficientNetBiFPN does not support explicit padding.')
if use_depthwise:
raise ValueError('EfficientNetBiFPN does not support use_depthwise.')
self._bifpn_min_level = bifpn_min_level
self._bifpn_max_level = bifpn_max_level
self._bifpn_num_iterations = bifpn_num_iterations
self._bifpn_num_filters = max(bifpn_num_filters, min_depth)
self._bifpn_node_params = {'combine_method': bifpn_combine_method}
self._efficientnet_version = efficientnet_version
self._use_native_resize_op = use_native_resize_op
logging.info('EfficientDet EfficientNet backbone version: %s',
self._efficientnet_version)
logging.info('EfficientDet BiFPN num filters: %d', self._bifpn_num_filters)
logging.info('EfficientDet BiFPN num iterations: %d',
self._bifpn_num_iterations)
self._backbone_max_level = min(
max(_EFFICIENTNET_LEVEL_ENDPOINTS.keys()), bifpn_max_level)
self._output_layer_names = [
_EFFICIENTNET_LEVEL_ENDPOINTS[i]
for i in range(bifpn_min_level, self._backbone_max_level + 1)]
self._output_layer_alias = [
'level_{}'.format(i)
for i in range(bifpn_min_level, self._backbone_max_level + 1)]
# Initialize the EfficientNet backbone.
# Note, this is currently done in the init method rather than in the build
# method, since doing so introduces an error which is not well understood.
efficientnet_overrides = {'rescale_input': False}
if override_base_feature_extractor_hyperparams:
efficientnet_overrides[
'weight_decay'] = conv_hyperparams.get_regularizer_weight()
if (conv_hyperparams.use_sync_batch_norm() and
keras_backend.is_tpu_strategy(tf.distribute.get_strategy())):
efficientnet_overrides['batch_norm'] = 'tpu'
efficientnet_base = efficientnet_model.EfficientNet.from_name(
model_name=self._efficientnet_version, overrides=efficientnet_overrides)
outputs = [efficientnet_base.get_layer(output_layer_name).output
for output_layer_name in self._output_layer_names]
self._efficientnet = tf.keras.Model(
inputs=efficientnet_base.inputs, outputs=outputs)
self.classification_backbone = efficientnet_base
self._bifpn_stage = None
def build(self, input_shape):
self._bifpn_stage = bifpn_generators.KerasBiFpnFeatureMaps(
bifpn_num_iterations=self._bifpn_num_iterations,
bifpn_num_filters=self._bifpn_num_filters,
fpn_min_level=self._bifpn_min_level,
fpn_max_level=self._bifpn_max_level,
input_max_level=self._backbone_max_level,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
bifpn_node_params=self._bifpn_node_params,
use_native_resize_op=self._use_native_resize_op,
name='bifpn')
self.built = True
def preprocess(self, inputs):
"""SSD preprocessing.
Channel-wise mean subtraction and scaling.
Args:
inputs: a [batch, height, width, channels] float tensor representing a
batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if inputs.shape.as_list()[3] == 3:
# Input images are expected to be in the range [0, 255].
channel_offset = [0.485, 0.456, 0.406]
channel_scale = [0.229, 0.224, 0.225]
return ((inputs / 255.0) - [[channel_offset]]) / [[channel_scale]]
else:
return inputs
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
base_feature_maps = self._efficientnet(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
output_feature_map_dict = self._bifpn_stage(
list(zip(self._output_layer_alias, base_feature_maps)))
return list(output_feature_map_dict.values())
class SSDEfficientNetB0BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=3,
bifpn_num_filters=64,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D0'):
"""SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB0BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b0',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB1BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=4,
bifpn_num_filters=88,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D1'):
"""SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB1BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b1',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB2BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=5,
bifpn_num_filters=112,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D2'):
"""SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB2BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b2',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB3BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=6,
bifpn_num_filters=160,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D3'):
"""SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB3BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b3',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB4BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=7,
bifpn_num_filters=224,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D4'):
"""SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB4BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b4',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB5BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=7,
bifpn_num_filters=288,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D5'):
"""SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB5BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b5',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB6BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=8,
bifpn_num_filters=384,
bifpn_combine_method='sum',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D6-D7'):
"""SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor.
SSD Keras EfficientNet-b6 BiFPN Feature Extractor, a.k.a. EfficientDet-d6
and EfficientDet-d7. The EfficientDet-d[6,7] models use the same backbone
EfficientNet-b6 and the same BiFPN architecture, and therefore have the same
number of parameters. They only differ in their input resolutions.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB6BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b6',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB7BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b7 BiFPN Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=8,
bifpn_num_filters=384,
bifpn_combine_method='sum',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientNet-B7_BiFPN'):
"""SSD Keras EfficientNet-b7 BiFPN Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB7BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b7',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
| 47,228 | 47.589506 | 96 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 FPN feature extractors."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase
from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SSDResnet50V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet50v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=True):
is_training = True
return (ssd_resnet_v1_fpn_keras_feature_extractor.
SSDResNet50V1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
name='ResNet50V1_FPN'))
def _resnet_scope_name(self):
return 'ResNet50V1_FPN'
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SSDResnet101V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet101v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (ssd_resnet_v1_fpn_keras_feature_extractor.
SSDResNet101V1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
name='ResNet101V1_FPN'))
def _resnet_scope_name(self):
return 'ResNet101V1_FPN'
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SSDResnet152V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet152v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (ssd_resnet_v1_fpn_keras_feature_extractor.
SSDResNet152V1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
name='ResNet152V1_FPN'))
def _resnet_scope_name(self):
return 'ResNet152V1_FPN'
if __name__ == '__main__':
tf.test.main()
| 4,210 | 39.490385 | 80 | py |
models | models-master/research/object_detection/models/feature_map_generators_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature map generators."""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import feature_map_generators
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
INCEPTION_V2_LAYOUT = {
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 256],
'anchor_strides': [16, 32, 64, -1, -1, -1],
'layer_target_norm': [20.0, -1, -1, -1, -1, -1],
}
INCEPTION_V3_LAYOUT = {
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1],
'aspect_ratios': [1.0, 2.0, 1.0/2, 3.0, 1.0/3]
}
EMBEDDED_SSD_MOBILENET_V1_LAYOUT = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''],
'layer_depth': [-1, -1, 512, 256, 256],
'conv_kernel_size': [-1, -1, 3, 3, 2],
}
SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = {
'from_layer': ['Conv2d_13_pointwise', '', '', ''],
'layer_depth': [-1, 256, 256, 256],
}
class MultiResolutionFeatureMapGeneratorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_map_generator(self, feature_map_layout,
pool_residual=False):
if tf_version.is_tf2():
return feature_map_generators.KerasMultiResolutionFeatureMaps(
feature_map_layout=feature_map_layout,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
freeze_batchnorm=False,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
name='FeatureMaps'
)
else:
def feature_map_generator(image_features):
return feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
image_features=image_features,
pool_residual=pool_residual)
return feature_map_generator
def test_get_expected_feature_map_shapes_with_inception_v2(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V2_LAYOUT)
def graph_fn():
feature_maps = feature_map_generator(image_features)
return feature_maps
expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256),
'Mixed_4c': (4, 14, 14, 576),
'Mixed_5c': (4, 7, 7, 1024),
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise(
self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
layout_copy = INCEPTION_V2_LAYOUT.copy()
layout_copy['use_depthwise'] = True
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=layout_copy)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256),
'Mixed_4c': (4, 14, 14, 576),
'Mixed_5c': (4, 7, 7, 1024),
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_use_explicit_padding(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
layout_copy = INCEPTION_V2_LAYOUT.copy()
layout_copy['use_explicit_padding'] = True
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=layout_copy,
)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256),
'Mixed_4c': (4, 14, 14, 576),
'Mixed_5c': (4, 7, 7, 1024),
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v3(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),
'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),
'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V3_LAYOUT,
)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_5d': (4, 35, 35, 256),
'Mixed_6e': (4, 17, 17, 576),
'Mixed_7c': (4, 8, 8, 1024),
'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1(
self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512],
dtype=tf.float32),
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT,
)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Conv2d_11_pointwise': (4, 16, 16, 512),
'Conv2d_13_pointwise': (4, 8, 8, 1024),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1(
self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT,
pool_residual=True
)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Conv2d_13_pointwise': (4, 8, 8, 1024),
'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names_with_inception_v2(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V2_LAYOUT,
)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([
'Mixed_5c_1_Conv2d_3_1x1_256/weights',
'Mixed_5c_1_Conv2d_3_1x1_256/biases',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases',
'Mixed_5c_1_Conv2d_4_1x1_128/weights',
'Mixed_5c_1_Conv2d_4_1x1_128/biases',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases',
'Mixed_5c_1_Conv2d_5_1x1_128/weights',
'Mixed_5c_1_Conv2d_5_1x1_128/biases',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases',
])
expected_keras_variables = set([
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',
])
if tf_version.is_tf2():
actual_variable_set = set(
[var.name.split(':')[0] for var in feature_map_generator.variables])
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
with g.as_default():
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
def test_get_expected_variable_names_with_inception_v2_use_depthwise(
self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
layout_copy = INCEPTION_V2_LAYOUT.copy()
layout_copy['use_depthwise'] = True
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=layout_copy,
)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([
'Mixed_5c_1_Conv2d_3_1x1_256/weights',
'Mixed_5c_1_Conv2d_3_1x1_256/biases',
'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/depthwise_weights',
'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/biases',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases',
'Mixed_5c_1_Conv2d_4_1x1_128/weights',
'Mixed_5c_1_Conv2d_4_1x1_128/biases',
'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/depthwise_weights',
'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/biases',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases',
'Mixed_5c_1_Conv2d_5_1x1_128/weights',
'Mixed_5c_1_Conv2d_5_1x1_128/biases',
'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/depthwise_weights',
'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/biases',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases',
])
expected_keras_variables = set([
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias',
('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/'
'depthwise_kernel'),
('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/'
'bias'),
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias',
('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/'
'depthwise_kernel'),
('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/'
'bias'),
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias',
('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/'
'depthwise_kernel'),
('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/'
'bias'),
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',
])
if tf_version.is_tf2():
actual_variable_set = set(
[var.name.split(':')[0] for var in feature_map_generator.variables])
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
with g.as_default():
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
@parameterized.parameters({'use_native_resize_op': True},
{'use_native_resize_op': False})
class FPNFeatureMapGeneratorTest(test_case.TestCase, parameterized.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_map_generator(
self, image_features, depth, use_bounded_activations=False,
use_native_resize_op=False, use_explicit_padding=False,
use_depthwise=False):
if tf_version.is_tf2():
return feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=len(image_features),
depth=depth,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
use_depthwise=use_depthwise,
use_explicit_padding=use_explicit_padding,
use_bounded_activations=use_bounded_activations,
use_native_resize_op=use_native_resize_op,
scope=None,
name='FeatureMaps',
)
else:
def feature_map_generator(image_features):
return feature_map_generators.fpn_top_down_feature_maps(
image_features=image_features,
depth=depth,
use_depthwise=use_depthwise,
use_explicit_padding=use_explicit_padding,
use_bounded_activations=use_bounded_activations,
use_native_resize_op=use_native_resize_op)
return feature_map_generator
def test_get_expected_feature_map_shapes(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128),
'top_down_block3': (4, 4, 4, 128),
'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128)
}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_explicit_padding(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_explicit_padding=True,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128),
'top_down_block3': (4, 4, 4, 128),
'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128)
}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
def test_use_bounded_activations_add_operations(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [('block2',
tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3',
tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4',
tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5',
tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_bounded_activations=True,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_added_operations = dict.fromkeys([
'top_down/clip_by_value', 'top_down/clip_by_value_1',
'top_down/clip_by_value_2', 'top_down/clip_by_value_3',
'top_down/clip_by_value_4', 'top_down/clip_by_value_5',
'top_down/clip_by_value_6'
])
op_names = {op.name: None for op in g.get_operations()}
self.assertDictContainsSubset(expected_added_operations, op_names)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
def test_use_bounded_activations_clip_value(
self, use_native_resize_op):
tf_graph = tf.Graph()
with tf_graph.as_default():
image_features = [
('block2', 255 * tf.ones([4, 8, 8, 256], dtype=tf.float32)),
('block3', 255 * tf.ones([4, 4, 4, 256], dtype=tf.float32)),
('block4', 255 * tf.ones([4, 2, 2, 256], dtype=tf.float32)),
('block5', 255 * tf.ones([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_bounded_activations=True,
use_native_resize_op=use_native_resize_op)
feature_map_generator(image_features)
expected_clip_by_value_ops = [
'top_down/clip_by_value', 'top_down/clip_by_value_1',
'top_down/clip_by_value_2', 'top_down/clip_by_value_3',
'top_down/clip_by_value_4', 'top_down/clip_by_value_5',
'top_down/clip_by_value_6'
]
# Gathers activation tensors before and after clip_by_value operations.
activations = {}
for clip_by_value_op in expected_clip_by_value_ops:
clip_input_tensor = tf_graph.get_operation_by_name(
'{}/Minimum'.format(clip_by_value_op)).inputs[0]
clip_output_tensor = tf_graph.get_tensor_by_name(
'{}:0'.format(clip_by_value_op))
activations.update({
'before_{}'.format(clip_by_value_op): clip_input_tensor,
'after_{}'.format(clip_by_value_op): clip_output_tensor,
})
expected_lower_bound = -feature_map_generators.ACTIVATION_BOUND
expected_upper_bound = feature_map_generators.ACTIVATION_BOUND
init_op = tf.global_variables_initializer()
with self.test_session() as session:
session.run(init_op)
activations_output = session.run(activations)
for clip_by_value_op in expected_clip_by_value_ops:
# Before clipping, activations are beyound the expected bound because
# of large input image_features values.
activations_before_clipping = (
activations_output['before_{}'.format(clip_by_value_op)])
before_clipping_lower_bound = np.amin(activations_before_clipping)
before_clipping_upper_bound = np.amax(activations_before_clipping)
self.assertLessEqual(before_clipping_lower_bound,
expected_lower_bound)
self.assertGreaterEqual(before_clipping_upper_bound,
expected_upper_bound)
# After clipping, activations are bounded as expectation.
activations_after_clipping = (
activations_output['after_{}'.format(clip_by_value_op)])
after_clipping_lower_bound = np.amin(activations_after_clipping)
after_clipping_upper_bound = np.amax(activations_after_clipping)
self.assertGreaterEqual(after_clipping_lower_bound,
expected_lower_bound)
self.assertLessEqual(after_clipping_upper_bound, expected_upper_bound)
def test_get_expected_feature_map_shapes_with_depthwise(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_depthwise=True,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128),
'top_down_block3': (4, 4, 4, 128),
'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128)
}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([
'projection_1/weights',
'projection_1/biases',
'projection_2/weights',
'projection_2/biases',
'projection_3/weights',
'projection_3/biases',
'projection_4/weights',
'projection_4/biases',
'smoothing_1/weights',
'smoothing_1/biases',
'smoothing_2/weights',
'smoothing_2/biases',
'smoothing_3/weights',
'smoothing_3/biases',
])
expected_keras_variables = set([
'FeatureMaps/top_down/projection_1/kernel',
'FeatureMaps/top_down/projection_1/bias',
'FeatureMaps/top_down/projection_2/kernel',
'FeatureMaps/top_down/projection_2/bias',
'FeatureMaps/top_down/projection_3/kernel',
'FeatureMaps/top_down/projection_3/bias',
'FeatureMaps/top_down/projection_4/kernel',
'FeatureMaps/top_down/projection_4/bias',
'FeatureMaps/top_down/smoothing_1_conv/kernel',
'FeatureMaps/top_down/smoothing_1_conv/bias',
'FeatureMaps/top_down/smoothing_2_conv/kernel',
'FeatureMaps/top_down/smoothing_2_conv/bias',
'FeatureMaps/top_down/smoothing_3_conv/kernel',
'FeatureMaps/top_down/smoothing_3_conv/bias'
])
if tf_version.is_tf2():
actual_variable_set = set(
[var.name.split(':')[0] for var in feature_map_generator.variables])
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
with g.as_default():
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
def test_get_expected_variable_names_with_depthwise(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_depthwise=True,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([
'projection_1/weights',
'projection_1/biases',
'projection_2/weights',
'projection_2/biases',
'projection_3/weights',
'projection_3/biases',
'projection_4/weights',
'projection_4/biases',
'smoothing_1/depthwise_weights',
'smoothing_1/pointwise_weights',
'smoothing_1/biases',
'smoothing_2/depthwise_weights',
'smoothing_2/pointwise_weights',
'smoothing_2/biases',
'smoothing_3/depthwise_weights',
'smoothing_3/pointwise_weights',
'smoothing_3/biases',
])
expected_keras_variables = set([
'FeatureMaps/top_down/projection_1/kernel',
'FeatureMaps/top_down/projection_1/bias',
'FeatureMaps/top_down/projection_2/kernel',
'FeatureMaps/top_down/projection_2/bias',
'FeatureMaps/top_down/projection_3/kernel',
'FeatureMaps/top_down/projection_3/bias',
'FeatureMaps/top_down/projection_4/kernel',
'FeatureMaps/top_down/projection_4/bias',
'FeatureMaps/top_down/smoothing_1_depthwise_conv/depthwise_kernel',
'FeatureMaps/top_down/smoothing_1_depthwise_conv/pointwise_kernel',
'FeatureMaps/top_down/smoothing_1_depthwise_conv/bias',
'FeatureMaps/top_down/smoothing_2_depthwise_conv/depthwise_kernel',
'FeatureMaps/top_down/smoothing_2_depthwise_conv/pointwise_kernel',
'FeatureMaps/top_down/smoothing_2_depthwise_conv/bias',
'FeatureMaps/top_down/smoothing_3_depthwise_conv/depthwise_kernel',
'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel',
'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias'
])
if tf_version.is_tf2():
actual_variable_set = set(
[var.name.split(':')[0] for var in feature_map_generator.variables])
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
with g.as_default():
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
class GetDepthFunctionTest(tf.test.TestCase):
def test_return_min_depth_when_multiplier_is_small(self):
depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,
min_depth=16)
self.assertEqual(depth_fn(16), 16)
def test_return_correct_depth_with_multiplier(self):
depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,
min_depth=16)
self.assertEqual(depth_fn(64), 32)
@parameterized.parameters(
{'replace_pool_with_conv': False},
{'replace_pool_with_conv': True},
)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase):
def test_get_expected_feature_map_shapes(self, replace_pool_with_conv):
image_features = {
'image_features': tf.random_uniform([4, 19, 19, 1024])
}
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=1024,
num_layers=6,
image_features=image_features,
replace_pool_with_conv=replace_pool_with_conv)
expected_pool_feature_map_shapes = {
'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),
'MaxPool2d_0_2x2': (4, 10, 10, 1024),
'MaxPool2d_1_2x2': (4, 5, 5, 1024),
'MaxPool2d_2_2x2': (4, 3, 3, 1024),
'MaxPool2d_3_2x2': (4, 2, 2, 1024),
'MaxPool2d_4_2x2': (4, 1, 1, 1024),
}
expected_conv_feature_map_shapes = {
'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),
'Conv2d_0_3x3_s2_1024': (4, 10, 10, 1024),
'Conv2d_1_3x3_s2_1024': (4, 5, 5, 1024),
'Conv2d_2_3x3_s2_1024': (4, 3, 3, 1024),
'Conv2d_3_3x3_s2_1024': (4, 2, 2, 1024),
'Conv2d_4_3x3_s2_1024': (4, 1, 1, 1024),
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = {key: value.shape
for key, value in out_feature_maps.items()}
if replace_pool_with_conv:
self.assertDictEqual(expected_conv_feature_map_shapes,
out_feature_map_shapes)
else:
self.assertDictEqual(expected_pool_feature_map_shapes,
out_feature_map_shapes)
def test_get_expected_variable_names(self, replace_pool_with_conv):
image_features = {
'image_features': tf.random_uniform([4, 19, 19, 1024])
}
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=1024,
num_layers=6,
image_features=image_features,
replace_pool_with_conv=replace_pool_with_conv)
expected_pool_variables = set([
'Base_Conv2d_1x1_1024/weights',
'Base_Conv2d_1x1_1024/biases',
])
expected_conv_variables = set([
'Base_Conv2d_1x1_1024/weights',
'Base_Conv2d_1x1_1024/biases',
'Conv2d_0_3x3_s2_1024/weights',
'Conv2d_0_3x3_s2_1024/biases',
'Conv2d_1_3x3_s2_1024/weights',
'Conv2d_1_3x3_s2_1024/biases',
'Conv2d_2_3x3_s2_1024/weights',
'Conv2d_2_3x3_s2_1024/biases',
'Conv2d_3_3x3_s2_1024/weights',
'Conv2d_3_3x3_s2_1024/biases',
'Conv2d_4_3x3_s2_1024/weights',
'Conv2d_4_3x3_s2_1024/biases',
])
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
sess.run(feature_maps)
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
if replace_pool_with_conv:
self.assertSetEqual(expected_conv_variables, actual_variable_set)
else:
self.assertSetEqual(expected_pool_variables, actual_variable_set)
if __name__ == '__main__':
tf.test.main()
| 35,575 | 41.201661 | 80 | py |
models | models-master/research/object_detection/models/center_net_mobilenet_v2_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing mobilenet_v2 feature extractor for CenterNet."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_mobilenet_v2_feature_extractor
from object_detection.models.keras_models import mobilenet_v2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMobileNetV2FeatureExtractorTest(test_case.TestCase):
def test_center_net_mobilenet_v2_feature_extractor(self):
net = mobilenet_v2.mobilenet_v2(True, include_top=False)
model = center_net_mobilenet_v2_feature_extractor.CenterNetMobileNetV2FeatureExtractor(
net)
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 64))
if __name__ == '__main__':
tf.test.main()
| 1,713 | 35.468085 | 91 | py |
models | models-master/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception Resnet v2 Faster R-CNN implementation in Keras.
See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261)
as well as
"Speed/accuracy trade-offs for modern convolutional object detectors" by
Huang et al. (https://arxiv.org/abs/1611.10012)
"""
# Skip pylint for this file because it times out
# pylint: skip-file
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models.keras_models import inception_resnet_v2
from object_detection.utils import model_util
from object_detection.utils import variables_helper
class FasterRCNNInceptionResnetV2KerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster R-CNN with Inception Resnet v2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNInceptionResnetV2KerasFeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
weight_decay)
self._variable_dict = {}
self.classification_backbone = None
def preprocess(self, resized_inputs):
"""Faster R-CNN with Inception Resnet v2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the first half of the Inception Resnet v2 network.
We construct the network in `align_feature_maps=True` mode, which means
that all VALID paddings in the network are changed to SAME padding so that
the feature maps are aligned.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A tensor with shape [batch, height, width, depth]
"""
if not self.classification_backbone:
self.classification_backbone = inception_resnet_v2.inception_resnet_v2(
self._train_batch_norm,
output_stride=self._first_stage_features_stride,
align_feature_maps=True,
weight_decay=self._weight_decay,
weights=None,
include_top=False)
with tf.name_scope(name):
with tf.name_scope('InceptionResnetV2'):
proposal_features = self.classification_backbone.get_layer(
name='block17_20_ac').output
keras_model = tf.keras.Model(
inputs=self.classification_backbone.inputs,
outputs=proposal_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
This function reconstructs the "second half" of the Inception ResNet v2
network after the part defined in `get_proposal_feature_extractor_model`.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
if not self.classification_backbone:
self.classification_backbone = inception_resnet_v2.inception_resnet_v2(
self._train_batch_norm,
output_stride=self._first_stage_features_stride,
align_feature_maps=True,
weight_decay=self._weight_decay,
weights=None,
include_top=False)
with tf.name_scope(name):
with tf.name_scope('InceptionResnetV2'):
proposal_feature_maps = self.classification_backbone.get_layer(
name='block17_20_ac').output
proposal_classifier_features = self.classification_backbone.get_layer(
name='conv_7b_ac').output
keras_model = model_util.extract_submodel(
model=self.classification_backbone,
inputs=proposal_feature_maps,
outputs=proposal_classifier_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
| 6,316 | 38.48125 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v1_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 FPN feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_fpn_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SsdMobilenetV1FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False,
use_keras=True):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
del use_keras
return (ssd_mobilenet_v1_fpn_keras_feature_extractor.
SSDMobileNetV1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
use_depthwise=True,
name='MobilenetV1_FPN'))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_384(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=True)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=True)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
if __name__ == '__main__':
tf.test.main()
| 7,748 | 42.05 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SSD Mobilenet V1 feature extractors.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
is_training=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
is_training: whether the network is in training mode.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
del use_keras
return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers)
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32),
(2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=False)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=False)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=False)
self.assertEqual(len(variables), 151)
def test_has_fused_batchnorm(self):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False, num_layers=4,
use_keras=False)
if __name__ == '__main__':
tf.test.main()
| 9,609 | 34.201465 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV2 features."""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import mobilenet_v2
from object_detection.utils import ops
from object_detection.utils import shape_utils
class SSDMobileNetV2KerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using MobilenetV2 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False,
name=None):
"""MobileNetV2 Feature Extractor for SSD Models.
Mobilenet v2 (experimental), designed by sandler@. More details can be found
in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor (Functions
as a width multiplier for the mobilenet_v2 network itself).
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDMobileNetV2KerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
self._feature_map_layout = {
'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', ''
][:self._num_layers],
'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
'use_depthwise': self._use_depthwise,
'use_explicit_padding': self._use_explicit_padding,
}
self.classification_backbone = None
self.feature_map_generator = None
def build(self, input_shape):
full_mobilenet_v2 = mobilenet_v2.mobilenet_v2(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
weights=None,
use_explicit_padding=self._use_explicit_padding,
alpha=self._depth_multiplier,
min_depth=self._min_depth,
include_top=False)
conv2d_11_pointwise = full_mobilenet_v2.get_layer(
name='block_13_expand_relu').output
conv2d_13_pointwise = full_mobilenet_v2.get_layer(name='out_relu').output
self.classification_backbone = tf.keras.Model(
inputs=full_mobilenet_v2.inputs,
outputs=[conv2d_11_pointwise, conv2d_13_pointwise])
self.feature_map_generator = (
feature_map_generators.KerasMultiResolutionFeatureMaps(
feature_map_layout=self._feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_maps = self.feature_map_generator({
'layer_15/expansion_output': image_features[0],
'layer_19': image_features[1]})
return list(feature_maps.values())
| 6,981 | 40.559524 | 80 | py |
models | models-master/research/object_detection/models/ssd_efficientnet_bifpn_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ssd_efficientnet_bifpn_feature_extractor."""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import ssd_efficientnet_bifpn_feature_extractor
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
def _count_params(model, trainable_only=True):
"""Returns the count of all model parameters, or just trainable ones."""
if not trainable_only:
return model.count_params()
else:
return int(np.sum([
tf.keras.backend.count_params(p) for p in model.trainable_weights]))
@parameterized.parameters(
{'efficientdet_version': 'efficientdet-d0',
'efficientnet_version': 'efficientnet-b0',
'bifpn_num_iterations': 3,
'bifpn_num_filters': 64,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d1',
'efficientnet_version': 'efficientnet-b1',
'bifpn_num_iterations': 4,
'bifpn_num_filters': 88,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d2',
'efficientnet_version': 'efficientnet-b2',
'bifpn_num_iterations': 5,
'bifpn_num_filters': 112,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d3',
'efficientnet_version': 'efficientnet-b3',
'bifpn_num_iterations': 6,
'bifpn_num_filters': 160,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d4',
'efficientnet_version': 'efficientnet-b4',
'bifpn_num_iterations': 7,
'bifpn_num_filters': 224,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d5',
'efficientnet_version': 'efficientnet-b5',
'bifpn_num_iterations': 7,
'bifpn_num_filters': 288,
'bifpn_combine_method': 'fast_attention'},
# efficientdet-d6 and efficientdet-d7 only differ in input size.
{'efficientdet_version': 'efficientdet-d6-d7',
'efficientnet_version': 'efficientnet-b6',
'bifpn_num_iterations': 8,
'bifpn_num_filters': 384,
'bifpn_combine_method': 'sum'})
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SSDEfficientNetBiFPNFeatureExtractorTest(
test_case.TestCase, parameterized.TestCase):
def _build_conv_hyperparams(self, add_batch_norm=True):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
force_use_bias: true
activation: SWISH
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
"""
if add_batch_norm:
batch_norm_proto = """
batch_norm {
scale: true,
decay: 0.99,
epsilon: 0.001,
}
"""
conv_hyperparams_text_proto += batch_norm_proto
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_feature_extractor(self,
efficientnet_version='efficientnet-b0',
bifpn_num_iterations=3,
bifpn_num_filters=64,
bifpn_combine_method='fast_attention'):
"""Constructs a new EfficientNetBiFPN feature extractor."""
depth_multiplier = 1.0
pad_to_multiple = 1
min_depth = 16
return (ssd_efficientnet_bifpn_feature_extractor
.SSDEfficientNetBiFPNKerasFeatureExtractor(
is_training=True,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version=efficientnet_version))
def test_efficientdet_feature_extractor_shapes(self,
efficientdet_version,
efficientnet_version,
bifpn_num_iterations,
bifpn_num_filters,
bifpn_combine_method):
feature_extractor = self._create_feature_extractor(
efficientnet_version=efficientnet_version,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method)
outputs = feature_extractor(np.zeros((2, 256, 256, 3), dtype=np.float32))
self.assertEqual(outputs[0].shape, (2, 32, 32, bifpn_num_filters))
self.assertEqual(outputs[1].shape, (2, 16, 16, bifpn_num_filters))
self.assertEqual(outputs[2].shape, (2, 8, 8, bifpn_num_filters))
self.assertEqual(outputs[3].shape, (2, 4, 4, bifpn_num_filters))
self.assertEqual(outputs[4].shape, (2, 2, 2, bifpn_num_filters))
def test_efficientdet_feature_extractor_params(self,
efficientdet_version,
efficientnet_version,
bifpn_num_iterations,
bifpn_num_filters,
bifpn_combine_method):
feature_extractor = self._create_feature_extractor(
efficientnet_version=efficientnet_version,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method)
_ = feature_extractor(np.zeros((2, 256, 256, 3), dtype=np.float32))
expected_params = {
'efficientdet-d0': 5484829,
'efficientdet-d1': 8185156,
'efficientdet-d2': 9818153,
'efficientdet-d3': 13792706,
'efficientdet-d4': 22691445,
'efficientdet-d5': 35795677,
'efficientdet-d6-d7': 53624512,
}
num_params = _count_params(feature_extractor)
self.assertEqual(expected_params[efficientdet_version], num_params)
if __name__ == '__main__':
tf.test.main()
| 7,390 | 40.061111 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class for ssd_mobilenet_edgetpu_feature_extractor."""
import abc
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
class _SsdMobilenetEdgeTPUFeatureExtractorTestBase(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
"""Base class for MobilenetEdgeTPU tests."""
@abc.abstractmethod
def _get_input_sizes(self):
"""Return feature map sizes for the two inputs to SSD head."""
pass
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]),
(2, 4, 4, input_feature_sizes[1]),
(2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_keras=False)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]),
(2, 10, 10, input_feature_sizes[1]),
(2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]),
(2, 10, 10, input_feature_sizes[1]),
(2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_has_fused_batchnorm(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
| 4,560 | 39.362832 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SSD Mobilenet V1 feature extractors.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SsdMobilenetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
is_training=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
is_training: whether the network is in training mode.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v1_keras_feature_extractor
.SSDMobileNetV1KerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
name='MobilenetV1'))
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32),
(2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=True)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=True)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False, num_layers=4,
use_keras=True)
if __name__ == '__main__':
tf.test.main()
| 8,796 | 34.329317 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_edgetpu_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor
from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor_testbase
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetEdgeTPUFeatureExtractorTest(
ssd_mobilenet_edgetpu_feature_extractor_testbase
._SsdMobilenetEdgeTPUFeatureExtractorTestBase):
def _get_input_sizes(self):
"""Return first two input feature map sizes."""
return [384, 192]
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
use_keras=False):
"""Constructs a new MobileNetEdgeTPU feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (ssd_mobilenet_edgetpu_feature_extractor
.SSDMobileNetEdgeTPUFeatureExtractor(
False,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
if __name__ == '__main__':
tf.test.main()
| 2,565 | 37.878788 | 84 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_resnet_keras_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_res
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FasterRcnnResnetKerasFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, architecture='resnet_v1_50'):
return frcnn_res.FasterRCNNResnet50KerasFeatureExtractor(
is_training=False,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 14, 14, 1024])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1024])
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
proposal_feature_maps = tf.random_uniform(
[3, 7, 7, 1024], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
# Note: due to a slight mismatch in slim and keras resnet definitions
# the output shape of the box classifier is slightly different compared to
# that of the slim implementation. The keras version is more `canonical`
# in that it more accurately reflects the original authors' implementation.
# TODO(jonathanhuang): make the output shape match that of the slim
# implementation by using atrous convolutions.
self.assertAllEqual(features_shape.numpy(), [3, 4, 4, 2048])
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 3,698 | 44.666667 | 91 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for Keras MobilenetV1 features."""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import mobilenet_v1
from object_detection.utils import ops
from object_detection.utils import shape_utils
class SSDMobileNetV1KerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using Keras MobilenetV1 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False,
name=None):
"""Keras MobileNetV1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDMobileNetV1KerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
self._feature_map_layout = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
'', ''][:self._num_layers],
'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
self.classification_backbone = None
self._feature_map_generator = None
def build(self, input_shape):
full_mobilenet_v1 = mobilenet_v1.mobilenet_v1(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
weights=None,
use_explicit_padding=self._use_explicit_padding,
alpha=self._depth_multiplier,
min_depth=self._min_depth,
include_top=False)
conv2d_11_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_11_relu').output
conv2d_13_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_13_relu').output
self.classification_backbone = tf.keras.Model(
inputs=full_mobilenet_v1.inputs,
outputs=[conv2d_11_pointwise, conv2d_13_pointwise])
self._feature_map_generator = (
feature_map_generators.KerasMultiResolutionFeatureMaps(
feature_map_layout=self._feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_maps = self._feature_map_generator({
'Conv2d_11_pointwise': image_features[0],
'Conv2d_13_pointwise': image_features[1]})
return list(feature_maps.values())
| 6,843 | 40.478788 | 80 | py |
models | models-master/research/object_detection/models/bidirectional_feature_pyramid_generators.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate bidirectional feature pyramids based on image features.
Provides bidirectional feature pyramid network (BiFPN) generators that can be
used to build object detection feature extractors, as proposed by Tan et al.
See https://arxiv.org/abs/1911.09070 for more details.
"""
import collections
import functools
from six.moves import range
from six.moves import zip
import tensorflow as tf
from object_detection.utils import bifpn_utils
def _create_bifpn_input_config(fpn_min_level,
fpn_max_level,
input_max_level,
level_scales=None):
"""Creates a BiFPN input config for the input levels from a backbone network.
Args:
fpn_min_level: the minimum pyramid level (highest feature map resolution) to
use in the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution) to
use in the BiFPN.
input_max_level: the maximum pyramid level that will be provided as input to
the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels
from input_max_level, up to the desired fpn_max_level.
level_scales: a list of pyramid level scale factors. If 'None', each level's
scale is set to 2^level by default, which corresponds to each successive
feature map scaling by a factor of 2.
Returns:
A list of dictionaries for each feature map expected as input to the BiFPN,
where each has entries for the feature map 'name' and 'scale'.
"""
if not level_scales:
level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)]
bifpn_input_params = []
for i in range(fpn_min_level, min(fpn_max_level, input_max_level) + 1):
bifpn_input_params.append({
'name': '0_up_lvl_{}'.format(i),
'scale': level_scales[i - fpn_min_level]
})
return bifpn_input_params
def _get_bifpn_output_node_names(fpn_min_level, fpn_max_level, node_config):
"""Returns a list of BiFPN output node names, given a BiFPN node config.
Args:
fpn_min_level: the minimum pyramid level (highest feature map resolution)
used by the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution)
used by the BiFPN.
node_config: the BiFPN node_config, a list of dictionaries corresponding to
each node in the BiFPN computation graph, where each entry should have an
associated 'name'.
Returns:
A list of strings corresponding to the names of the output BiFPN nodes.
"""
num_output_nodes = fpn_max_level - fpn_min_level + 1
return [node['name'] for node in node_config[-num_output_nodes:]]
def _create_bifpn_node_config(bifpn_num_iterations,
bifpn_num_filters,
fpn_min_level,
fpn_max_level,
input_max_level,
bifpn_node_params=None,
level_scales=None,
use_native_resize_op=False):
"""Creates a config specifying a bidirectional feature pyramid network.
Args:
bifpn_num_iterations: the number of top-down bottom-up feature computations
to repeat in the BiFPN.
bifpn_num_filters: the number of filters (channels) for every feature map
used in the BiFPN.
fpn_min_level: the minimum pyramid level (highest feature map resolution) to
use in the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution) to
use in the BiFPN.
input_max_level: the maximum pyramid level that will be provided as input to
the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels
from input_max_level, up to the desired fpn_max_level.
bifpn_node_params: If not 'None', a dictionary of additional default BiFPN
node parameters that will be applied to all BiFPN nodes.
level_scales: a list of pyramid level scale factors. If 'None', each level's
scale is set to 2^level by default, which corresponds to each successive
feature map scaling by a factor of 2.
use_native_resize_op: If true, will use
tf.compat.v1.image.resize_nearest_neighbor for unsampling.
Returns:
A list of dictionaries used to define nodes in the BiFPN computation graph,
as proposed by EfficientDet, Tan et al (https://arxiv.org/abs/1911.09070).
Each node's entry has the corresponding keys:
name: String. The name of this node in the BiFPN. The node name follows
the format '{bifpn_iteration}_{dn|up}_lvl_{pyramid_level}', where 'dn'
or 'up' refers to whether the node is in the top-down or bottom-up
portion of a single BiFPN iteration.
scale: the scale factor for this node, by default 2^level.
inputs: A list of names of nodes which are inputs to this node.
num_channels: The number of channels for this node.
combine_method: String. Name of the method used to combine input
node feature maps, 'fast_attention' by default for nodes which have more
than one input. Otherwise, 'None' for nodes with only one input node.
input_op: A (partial) function which is called to construct the layers
that will be applied to this BiFPN node's inputs. This function is
called with the arguments:
input_op(name, input_scale, input_num_channels, output_scale,
output_num_channels, conv_hyperparams, is_training,
freeze_batchnorm)
post_combine_op: A (partial) function which is called to construct the
layers that will be applied to the result of the combine operation for
this BiFPN node. This function will be called with the arguments:
post_combine_op(name, conv_hyperparams, is_training, freeze_batchnorm)
If 'None', then no layers will be applied after the combine operation
for this node.
"""
if not level_scales:
level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)]
default_node_params = {
'num_channels':
bifpn_num_filters,
'combine_method':
'fast_attention',
'input_op':
functools.partial(
_create_bifpn_resample_block,
downsample_method='max_pooling',
use_native_resize_op=use_native_resize_op),
'post_combine_op':
functools.partial(
bifpn_utils.create_conv_block,
num_filters=bifpn_num_filters,
kernel_size=3,
strides=1,
padding='SAME',
use_separable=True,
apply_batchnorm=True,
apply_activation=True,
conv_bn_act_pattern=False),
}
if bifpn_node_params:
default_node_params.update(bifpn_node_params)
bifpn_node_params = []
# Create additional base pyramid levels not provided as input to the BiFPN.
# Note, combine_method and post_combine_op are set to None for additional
# base pyramid levels because they do not combine multiple input BiFPN nodes.
for i in range(input_max_level + 1, fpn_max_level + 1):
node_params = dict(default_node_params)
node_params.update({
'name': '0_up_lvl_{}'.format(i),
'scale': level_scales[i - fpn_min_level],
'inputs': ['0_up_lvl_{}'.format(i - 1)],
'combine_method': None,
'post_combine_op': None,
})
bifpn_node_params.append(node_params)
for i in range(bifpn_num_iterations):
# The first bottom-up feature pyramid (which includes the input pyramid
# levels from the backbone network and the additional base pyramid levels)
# is indexed at 0. So, the first top-down bottom-up pass of the BiFPN is
# indexed from 1, and repeated for bifpn_num_iterations iterations.
bifpn_i = i + 1
# Create top-down nodes.
for level_i in reversed(range(fpn_min_level, fpn_max_level)):
inputs = []
# BiFPN nodes in the top-down pass receive input from the corresponding
# level from the previous BiFPN iteration's bottom-up pass, except for the
# bottom-most (min) level node, which is computed once in the initial
# bottom-up pass, and is afterwards only computed in each top-down pass.
if level_i > fpn_min_level or bifpn_i == 1:
inputs.append('{}_up_lvl_{}'.format(bifpn_i - 1, level_i))
else:
inputs.append('{}_dn_lvl_{}'.format(bifpn_i - 1, level_i))
inputs.append(bifpn_node_params[-1]['name'])
node_params = dict(default_node_params)
node_params.update({
'name': '{}_dn_lvl_{}'.format(bifpn_i, level_i),
'scale': level_scales[level_i - fpn_min_level],
'inputs': inputs
})
bifpn_node_params.append(node_params)
# Create bottom-up nodes.
for level_i in range(fpn_min_level + 1, fpn_max_level + 1):
# BiFPN nodes in the bottom-up pass receive input from the corresponding
# level from the preceding top-down pass, except for the top (max) level
# which does not have a corresponding node in the top-down pass.
inputs = ['{}_up_lvl_{}'.format(bifpn_i - 1, level_i)]
if level_i < fpn_max_level:
inputs.append('{}_dn_lvl_{}'.format(bifpn_i, level_i))
inputs.append(bifpn_node_params[-1]['name'])
node_params = dict(default_node_params)
node_params.update({
'name': '{}_up_lvl_{}'.format(bifpn_i, level_i),
'scale': level_scales[level_i - fpn_min_level],
'inputs': inputs
})
bifpn_node_params.append(node_params)
return bifpn_node_params
def _create_bifpn_resample_block(name,
input_scale,
input_num_channels,
output_scale,
output_num_channels,
conv_hyperparams,
is_training,
freeze_batchnorm,
downsample_method=None,
use_native_resize_op=False,
maybe_apply_1x1_conv=True,
apply_1x1_pre_sampling=True,
apply_1x1_post_sampling=False):
"""Creates resample block layers for input feature maps to BiFPN nodes.
Args:
name: String. Name used for this block of layers.
input_scale: Scale factor of the input feature map.
input_num_channels: Number of channels in the input feature map.
output_scale: Scale factor of the output feature map.
output_num_channels: Number of channels in the output feature map.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Indicates whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
downsample_method: String. Method to use when downsampling feature maps.
use_native_resize_op: Bool. Whether to use the native resize up when
upsampling feature maps.
maybe_apply_1x1_conv: Bool. If 'True', a 1x1 convolution will only be
applied if the input_num_channels differs from the output_num_channels.
apply_1x1_pre_sampling: Bool. Whether a 1x1 convolution will be applied to
the input feature map before the up/down-sampling operation.
apply_1x1_post_sampling: Bool. Whether a 1x1 convolution will be applied to
the input feature map after the up/down-sampling operation.
Returns:
A list of layers which may be applied to the input feature maps in order to
compute feature maps with the specified scale and number of channels.
"""
# By default, 1x1 convolutions are only applied before sampling when the
# number of input and output channels differ.
if maybe_apply_1x1_conv and output_num_channels == input_num_channels:
apply_1x1_pre_sampling = False
apply_1x1_post_sampling = False
apply_bn_for_resampling = True
layers = []
if apply_1x1_pre_sampling:
layers.extend(
bifpn_utils.create_conv_block(
name=name + '1x1_pre_sample/',
num_filters=output_num_channels,
kernel_size=1,
strides=1,
padding='SAME',
use_separable=False,
apply_batchnorm=apply_bn_for_resampling,
apply_activation=False,
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
layers.extend(
bifpn_utils.create_resample_feature_map_ops(input_scale, output_scale,
downsample_method,
use_native_resize_op,
conv_hyperparams, is_training,
freeze_batchnorm, name))
if apply_1x1_post_sampling:
layers.extend(
bifpn_utils.create_conv_block(
name=name + '1x1_post_sample/',
num_filters=output_num_channels,
kernel_size=1,
strides=1,
padding='SAME',
use_separable=False,
apply_batchnorm=apply_bn_for_resampling,
apply_activation=False,
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
return layers
def _create_bifpn_combine_op(num_inputs, name, combine_method):
"""Creates a BiFPN output config, a list of the output BiFPN node names.
Args:
num_inputs: The number of inputs to this combine operation.
name: String. The name of this combine operation.
combine_method: String. The method used to combine input feature maps.
Returns:
A function which may be called with a list of num_inputs feature maps
and which will return a single feature map.
"""
combine_op = None
if num_inputs < 1:
raise ValueError('Expected at least 1 input for BiFPN combine.')
elif num_inputs == 1:
combine_op = lambda x: x[0]
else:
combine_op = bifpn_utils.BiFPNCombineLayer(
combine_method=combine_method, name=name)
return combine_op
class KerasBiFpnFeatureMaps(tf.keras.Model):
"""Generates Keras based BiFPN feature maps from an input feature map pyramid.
A Keras model that generates multi-scale feature maps for detection by
iteratively computing top-down and bottom-up feature pyramids, as in the
EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070 for details.
"""
def __init__(self,
bifpn_num_iterations,
bifpn_num_filters,
fpn_min_level,
fpn_max_level,
input_max_level,
is_training,
conv_hyperparams,
freeze_batchnorm,
bifpn_node_params=None,
use_native_resize_op=False,
name=None):
"""Constructor.
Args:
bifpn_num_iterations: The number of top-down bottom-up iterations.
bifpn_num_filters: The number of filters (channels) to be used for all
feature maps in this BiFPN.
fpn_min_level: The minimum pyramid level (highest feature map resolution)
to use in the BiFPN.
fpn_max_level: The maximum pyramid level (lowest feature map resolution)
to use in the BiFPN.
input_max_level: The maximum pyramid level that will be provided as input
to the BiFPN. Accordingly, the BiFPN will compute any additional pyramid
levels from input_max_level up to the desired fpn_max_level, with each
successivel level downsampling by a scale factor of 2 by default.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
bifpn_node_params: An optional dictionary that may be used to specify
default parameters for BiFPN nodes, without the need to provide a custom
bifpn_node_config. For example, if '{ combine_method: 'sum' }', then all
BiFPN nodes will combine input feature maps by summation, rather than
by the default fast attention method.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for unsampling.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasBiFpnFeatureMaps, self).__init__(name=name)
bifpn_node_config = _create_bifpn_node_config(
bifpn_num_iterations,
bifpn_num_filters,
fpn_min_level,
fpn_max_level,
input_max_level,
bifpn_node_params,
use_native_resize_op=use_native_resize_op)
bifpn_input_config = _create_bifpn_input_config(fpn_min_level,
fpn_max_level,
input_max_level)
bifpn_output_node_names = _get_bifpn_output_node_names(
fpn_min_level, fpn_max_level, bifpn_node_config)
self.bifpn_node_config = bifpn_node_config
self.bifpn_output_node_names = bifpn_output_node_names
self.node_input_blocks = []
self.node_combine_op = []
self.node_post_combine_block = []
all_node_params = bifpn_input_config
all_node_names = [node['name'] for node in all_node_params]
for node_config in bifpn_node_config:
# Maybe transform and/or resample input feature maps.
input_blocks = []
for input_name in node_config['inputs']:
if input_name not in all_node_names:
raise ValueError(
'Input feature map ({}) does not exist:'.format(input_name))
input_index = all_node_names.index(input_name)
input_params = all_node_params[input_index]
input_block = node_config['input_op'](
name='{}/input_{}/'.format(node_config['name'], input_name),
input_scale=input_params['scale'],
input_num_channels=input_params.get('num_channels', None),
output_scale=node_config['scale'],
output_num_channels=node_config['num_channels'],
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm)
input_blocks.append((input_index, input_block))
# Combine input feature maps.
combine_op = _create_bifpn_combine_op(
num_inputs=len(input_blocks),
name=(node_config['name'] + '/combine'),
combine_method=node_config['combine_method'])
# Post-combine layers.
post_combine_block = []
if node_config['post_combine_op']:
post_combine_block.extend(node_config['post_combine_op'](
name=node_config['name'] + '/post_combine/',
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
self.node_input_blocks.append(input_blocks)
self.node_combine_op.append(combine_op)
self.node_post_combine_block.append(post_combine_block)
all_node_params.append(node_config)
all_node_names.append(node_config['name'])
def call(self, feature_pyramid):
"""Compute BiFPN feature maps from input feature pyramid.
Executed when calling the `.__call__` method on input.
Args:
feature_pyramid: list of tuples of (tensor_name, image_feature_tensor).
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
feature_maps = [el[1] for el in feature_pyramid]
output_feature_maps = [None for node in self.bifpn_output_node_names]
for index, node in enumerate(self.bifpn_node_config):
node_scope = 'node_{:02d}'.format(index)
with tf.name_scope(node_scope):
# Apply layer blocks to this node's input feature maps.
input_block_results = []
for input_index, input_block in self.node_input_blocks[index]:
block_result = feature_maps[input_index]
for layer in input_block:
block_result = layer(block_result)
input_block_results.append(block_result)
# Combine the resulting feature maps.
node_result = self.node_combine_op[index](input_block_results)
# Apply post-combine layer block if applicable.
for layer in self.node_post_combine_block[index]:
node_result = layer(node_result)
feature_maps.append(node_result)
if node['name'] in self.bifpn_output_node_names:
index = self.bifpn_output_node_names.index(node['name'])
output_feature_maps[index] = node_result
return collections.OrderedDict(
zip(self.bifpn_output_node_names, output_feature_maps))
| 22,101 | 43.115768 | 80 | py |
models | models-master/research/object_detection/models/center_net_mobilenet_v2_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet V2[1] feature extractor for CenterNet[2] meta architecture.
[1]: https://arxiv.org/abs/1801.04381
[2]: https://arxiv.org/abs/1904.07850
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2
class CenterNetMobileNetV2FeatureExtractor(
center_net_meta_arch.CenterNetFeatureExtractor):
"""The MobileNet V2 feature extractor for CenterNet."""
def __init__(self,
mobilenet_v2_net,
channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.),
bgr_ordering=False):
"""Intializes the feature extractor.
Args:
mobilenet_v2_net: The underlying mobilenet_v2 network to use.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetMobileNetV2FeatureExtractor, self).__init__(
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
self._network = mobilenet_v2_net
output = self._network(self._network.input)
# MobileNet by itself transforms a 224x224x3 volume into a 7x7x1280, which
# leads to a stride of 32. We perform upsampling to get it to a target
# stride of 4.
for num_filters in [256, 128, 64]:
# 1. We use a simple convolution instead of a deformable convolution
conv = tf.keras.layers.Conv2D(
filters=num_filters, kernel_size=1, strides=1, padding='same')
output = conv(output)
output = tf.keras.layers.BatchNormalization()(output)
output = tf.keras.layers.ReLU()(output)
# 2. We use the default initialization for the convolution layers
# instead of initializing it to do bilinear upsampling.
conv_transpose = tf.keras.layers.Conv2DTranspose(
filters=num_filters, kernel_size=3, strides=2, padding='same')
output = conv_transpose(output)
output = tf.keras.layers.BatchNormalization()(output)
output = tf.keras.layers.ReLU()(output)
self._network = tf.keras.models.Model(
inputs=self._network.input, outputs=output)
def preprocess(self, resized_inputs):
resized_inputs = super(CenterNetMobileNetV2FeatureExtractor,
self).preprocess(resized_inputs)
return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs)
def load_feature_extractor_weights(self, path):
self._network.load_weights(path)
def call(self, inputs):
return [self._network(inputs)]
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""The number of feature outputs returned by the feature extractor."""
return 1
@property
def classification_backbone(self):
return self._network
def mobilenet_v2(channel_means, channel_stds, bgr_ordering,
depth_multiplier=1.0, **kwargs):
"""The MobileNetV2 backbone for CenterNet."""
del kwargs
# We set 'is_training' to True for now.
network = mobilenetv2.mobilenet_v2(
batchnorm_training=True,
alpha=depth_multiplier,
include_top=False,
weights='imagenet' if depth_multiplier == 1.0 else None)
return CenterNetMobileNetV2FeatureExtractor(
network,
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
| 4,444 | 36.041667 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SsdMobilenetV2FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
use_keras: unused argument.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v2_keras_feature_extractor.
SSDMobileNetV2KerasFeatureExtractor(
is_training=False,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
name='MobilenetV2'))
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True, use_keras=True)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=True)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=True)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=True)
self.assertEqual(len(variables), 292)
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False, num_layers=4,
use_keras=True)
if __name__ == '__main__':
tf.test.main()
| 7,894 | 39.906736 | 80 | py |
models | models-master/research/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing hourglass feature extractor for CenterNet."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_hourglass_feature_extractor as hourglass
from object_detection.models.keras_models import hourglass_network
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetHourglassFeatureExtractorTest(test_case.TestCase):
def test_center_net_hourglass_feature_extractor(self):
net = hourglass_network.HourglassNetwork(
num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6],
input_channel_dims=4, channel_dims_per_stage=[6, 8, 10, 12, 14],
num_hourglasses=2)
model = hourglass.CenterNetHourglassFeatureExtractor(net)
def graph_fn():
return model(tf.zeros((2, 64, 64, 3), dtype=np.float32))
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs[0].shape, (2, 16, 16, 6))
self.assertEqual(outputs[1].shape, (2, 16, 16, 6))
if __name__ == '__main__':
tf.test.main()
| 1,801 | 38.173913 | 87 | py |
models | models-master/research/object_detection/models/ssd_feature_extractor_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class SSDFeatureExtractors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
class SsdFeatureExtractorTestBase(test_case.TestCase):
def _build_conv_hyperparams(self, add_batch_norm=True):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
if add_batch_norm:
batch_norm_proto = """
batch_norm {
scale: false
}
"""
conv_hyperparams_text_proto += batch_norm_proto
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def conv_hyperparams_fn(self):
with slim.arg_scope([]) as sc:
return sc
@abstractmethod
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
use_depthwise: Whether to use depthwise convolutions.
Returns:
an ssd_meta_arch.SSDFeatureExtractor or an
ssd_meta_arch.SSDKerasFeatureExtractor object.
"""
pass
def _create_features(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
use_keras=False):
kwargs = {}
if use_explicit_padding:
kwargs.update({'use_explicit_padding': use_explicit_padding})
if use_depthwise:
kwargs.update({'use_depthwise': use_depthwise})
if num_layers != 6:
kwargs.update({'num_layers': num_layers})
if use_keras:
kwargs.update({'use_keras': use_keras})
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
**kwargs)
return feature_extractor
def _extract_features(self,
image_tensor,
feature_extractor,
use_keras=False):
if use_keras:
feature_maps = feature_extractor(image_tensor)
else:
feature_maps = feature_extractor.extract_features(image_tensor)
return feature_maps
def check_extract_features_returns_correct_shape(self,
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shapes,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False,
num_channels=3):
with test_utils.GraphContextOrNone() as g:
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn(image_tensor):
return self._extract_features(
image_tensor,
feature_extractor,
use_keras=use_keras)
image_tensor = np.random.rand(batch_size, image_height, image_width,
num_channels).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor], graph=g)
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_returns_correct_shapes_with_dynamic_inputs(
self,
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shapes,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([batch_size, image_height, image_width,
3], dtype=tf.float32)
return self._extract_features(
image_tensor,
feature_extractor,
use_keras=use_keras)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
], graph=g)
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_raises_error_with_invalid_image_size(
self,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
batch = 4
width = tf.random.uniform([], minval=image_width, maxval=image_width+1,
dtype=tf.int32)
height = tf.random.uniform([], minval=image_height, maxval=image_height+1,
dtype=tf.int32)
shape = tf.stack([batch, height, width, 3])
preprocessed_inputs = tf.random.uniform(shape)
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn():
feature_maps = self._extract_features(
preprocessed_inputs,
feature_extractor,
use_keras=use_keras)
return feature_maps
if self.is_tf2():
with self.assertRaises(ValueError):
self.execute_cpu(graph_fn, [], graph=g)
else:
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute_cpu(graph_fn, [], graph=g)
def check_feature_extractor_variables_under_scope(self,
depth_multiplier,
pad_to_multiple,
scope_name,
use_keras=False,
use_depthwise=False):
variables = self.get_feature_extractor_variables(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
for variable in variables:
self.assertTrue(variable.name.startswith(scope_name))
def get_feature_extractor_variables(self,
depth_multiplier,
pad_to_multiple,
use_keras=False,
use_depthwise=False):
g = tf.Graph()
with g.as_default():
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
self._extract_features(
preprocessed_inputs,
feature_extractor,
use_keras=use_keras)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
| 9,745 | 35.916667 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V2 FPN feature extractors in SSD.
"""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_fpn_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
@parameterized.parameters(
{
'use_depthwise': False
},
{
'use_depthwise': True
},
)
class SsdMobilenetV2FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
is_training=True,
use_explicit_padding=False,
use_keras=False,
use_depthwise=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
use_depthwise: Whether to use depthwise convolutions.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v2_fpn_feature_extractor
.SSDMobileNetV2FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_depthwise=use_depthwise,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self, use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_384(self, use_depthwise):
use_keras = False
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_4_channels(self,
use_depthwise):
use_keras = False
image_height = 320
image_width = 320
num_channels = 4
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise,
num_channels=num_channels)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise,
num_channels=num_channels)
def test_extract_features_with_dynamic_image_shape(self,
use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self, use_depthwise):
use_keras = False
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self, use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_raises_error_with_invalid_image_size(
self, use_depthwise):
use_keras = False
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_preprocess_returns_correct_value_range(self,
use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self, use_depthwise):
use_keras = False
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier,
pad_to_multiple,
scope_name,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_fused_batchnorm(self, use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
def test_variable_count(self, use_depthwise):
use_keras = False
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
expected_variables_len = 274
if use_depthwise:
expected_variables_len = 278
self.assertEqual(len(variables), expected_variables_len)
def test_get_expected_feature_map_variable_names(self,
use_depthwise):
use_keras = False
depth_multiplier = 1.0
pad_to_multiple = 1
slim_expected_feature_maps_variables = set([
# Slim Mobilenet V2 feature maps
'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights',
'MobilenetV2/Conv_1/weights',
# FPN layers
'MobilenetV2/fpn/bottom_up_Conv2d_20/weights',
'MobilenetV2/fpn/bottom_up_Conv2d_21/weights',
'MobilenetV2/fpn/smoothing_1/weights',
'MobilenetV2/fpn/smoothing_2/weights',
'MobilenetV2/fpn/projection_1/weights',
'MobilenetV2/fpn/projection_2/weights',
'MobilenetV2/fpn/projection_3/weights',
])
slim_expected_feature_maps_variables_with_depthwise = set([
# Slim Mobilenet V2 feature maps
'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights',
'MobilenetV2/Conv_1/weights',
# FPN layers
'MobilenetV2/fpn/bottom_up_Conv2d_20/pointwise_weights',
'MobilenetV2/fpn/bottom_up_Conv2d_20/depthwise_weights',
'MobilenetV2/fpn/bottom_up_Conv2d_21/pointwise_weights',
'MobilenetV2/fpn/bottom_up_Conv2d_21/depthwise_weights',
'MobilenetV2/fpn/smoothing_1/depthwise_weights',
'MobilenetV2/fpn/smoothing_1/pointwise_weights',
'MobilenetV2/fpn/smoothing_2/depthwise_weights',
'MobilenetV2/fpn/smoothing_2/pointwise_weights',
'MobilenetV2/fpn/projection_1/weights',
'MobilenetV2/fpn/projection_2/weights',
'MobilenetV2/fpn/projection_3/weights',
])
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
_ = feature_extractor.extract_features(preprocessed_inputs)
expected_feature_maps_variables = slim_expected_feature_maps_variables
if use_depthwise:
expected_feature_maps_variables = (
slim_expected_feature_maps_variables_with_depthwise)
actual_variable_set = set([
var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
])
variable_intersection = expected_feature_maps_variables.intersection(
actual_variable_set)
self.assertSetEqual(expected_feature_maps_variables,
variable_intersection)
if __name__ == '__main__':
tf.test.main()
| 14,559 | 34.773956 | 80 | py |
models | models-master/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnetv1 FPN [1] based feature extractors for CenterNet[2] meta architecture.
[1]: https://arxiv.org/abs/1612.03144.
[2]: https://arxiv.org/abs/1904.07850.
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor
from object_detection.models.keras_models import resnet_v1
_RESNET_MODEL_OUTPUT_LAYERS = {
'resnet_v1_18': ['conv2_block2_out', 'conv3_block2_out',
'conv4_block2_out', 'conv5_block2_out'],
'resnet_v1_34': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out'],
}
class CenterNetResnetV1FpnFeatureExtractor(CenterNetFeatureExtractor):
"""Resnet v1 FPN base feature extractor for the CenterNet model.
This feature extractor uses residual skip connections and nearest neighbor
upsampling to produce an output feature map of stride 4, which has precise
localization information along with strong semantic information from the top
of the net. This design does not exactly follow the original FPN design,
specifically:
- Since only one output map is necessary for heatmap prediction (stride 4
output), the top-down feature maps can have different numbers of channels.
Specifically, the top down feature maps have the following sizes:
[h/4, w/4, 64], [h/8, w/8, 128], [h/16, w/16, 256], [h/32, w/32, 256].
- No additional coarse features are used after conv5_x.
"""
def __init__(self, resnet_type, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes the feature extractor with a specific ResNet architecture.
Args:
resnet_type: A string specifying which kind of ResNet to use. Currently
only `resnet_v1_50` and `resnet_v1_101` are supported.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetResnetV1FpnFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
if resnet_type == 'resnet_v1_50':
self._base_model = tf.keras.applications.ResNet50(weights=None,
include_top=False)
elif resnet_type == 'resnet_v1_101':
self._base_model = tf.keras.applications.ResNet101(weights=None,
include_top=False)
elif resnet_type == 'resnet_v1_18':
self._base_model = resnet_v1.resnet_v1_18(weights=None, include_top=False)
elif resnet_type == 'resnet_v1_34':
self._base_model = resnet_v1.resnet_v1_34(weights=None, include_top=False)
else:
raise ValueError('Unknown Resnet Model {}'.format(resnet_type))
output_layers = _RESNET_MODEL_OUTPUT_LAYERS[resnet_type]
outputs = [self._base_model.get_layer(output_layer_name).output
for output_layer_name in output_layers]
self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input,
outputs=outputs)
resnet_outputs = self._resnet_model(self._base_model.input)
# Construct the top-down feature maps.
top_layer = resnet_outputs[-1]
residual_op = tf.keras.layers.Conv2D(filters=256, kernel_size=1,
strides=1, padding='same')
top_down = residual_op(top_layer)
num_filters_list = [256, 128, 64]
for i, num_filters in enumerate(num_filters_list):
level_ind = 2 - i
# Upsample.
upsample_op = tf.keras.layers.UpSampling2D(2, interpolation='nearest')
top_down = upsample_op(top_down)
# Residual (skip-connection) from bottom-up pathway.
residual_op = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=1,
strides=1, padding='same')
residual = residual_op(resnet_outputs[level_ind])
# Merge.
top_down = top_down + residual
next_num_filters = num_filters_list[i+1] if i + 1 <= 2 else 64
conv = tf.keras.layers.Conv2D(filters=next_num_filters,
kernel_size=3, strides=1, padding='same')
top_down = conv(top_down)
top_down = tf.keras.layers.BatchNormalization()(top_down)
top_down = tf.keras.layers.ReLU()(top_down)
self._feature_extractor_model = tf.keras.models.Model(
inputs=self._base_model.input, outputs=top_down)
def preprocess(self, resized_inputs):
"""Preprocess input images for the ResNet model.
This scales images in the range [0, 255] to the range [-1, 1]
Args:
resized_inputs: a [batch, height, width, channels] float32 tensor.
Returns:
outputs: a [batch, height, width, channels] float32 tensor.
"""
resized_inputs = super(
CenterNetResnetV1FpnFeatureExtractor, self).preprocess(resized_inputs)
return tf.keras.applications.resnet.preprocess_input(resized_inputs)
def load_feature_extractor_weights(self, path):
self._base_model.load_weights(path)
def call(self, inputs):
"""Returns image features extracted by the backbone.
Args:
inputs: An image tensor of shape [batch_size, input_height,
input_width, 3]
Returns:
features_list: A list of length 1 containing a tensor of shape
[batch_size, input_height // 4, input_width // 4, 64] containing
the features extracted by the ResNet.
"""
return [self._feature_extractor_model(inputs)]
@property
def num_feature_outputs(self):
return 1
@property
def out_stride(self):
return 4
@property
def classification_backbone(self):
return self._base_model
def resnet_v1_101_fpn(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v1 101 FPN feature extractor."""
del kwargs
return CenterNetResnetV1FpnFeatureExtractor(
resnet_type='resnet_v1_101',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering
)
def resnet_v1_50_fpn(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v1 50 FPN feature extractor."""
del kwargs
return CenterNetResnetV1FpnFeatureExtractor(
resnet_type='resnet_v1_50',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def resnet_v1_34_fpn(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v1 34 FPN feature extractor."""
del kwargs
return CenterNetResnetV1FpnFeatureExtractor(
resnet_type='resnet_v1_34',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering
)
def resnet_v1_18_fpn(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v1 18 FPN feature extractor."""
del kwargs
return CenterNetResnetV1FpnFeatureExtractor(
resnet_type='resnet_v1_18',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
| 8,230 | 37.643192 | 94 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Keras-based ResnetV1 FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import resnet_v1
from object_detection.utils import ops
from object_detection.utils import shape_utils
_RESNET_MODEL_OUTPUT_LAYERS = {
'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out'],
'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out',
'conv4_block36_out', 'conv5_block3_out'],
}
class SSDResNetV1FpnKerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based ResnetV1 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
resnet_v1_base_model,
resnet_v1_base_model_name,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=False,
name=None):
"""SSD Keras based FPN feature extractor Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: whether to use explicit padding when extracting
features. Default is None, as it's an invalid option and not implemented
in this feature extractor.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDResNetV1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
if self._use_explicit_padding:
raise ValueError('Explicit padding is not a valid option.')
if self._use_depthwise:
raise ValueError('Depthwise is not a valid option.')
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
self._resnet_block_names = ['block1', 'block2', 'block3', 'block4']
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def build(self, input_shape):
full_resnet_v1_model = self._resnet_v1_base_model(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
classes=None,
weights=None,
include_top=False)
output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name]
outputs = [full_resnet_v1_model.get_layer(output_layer_name).output
for output_layer_name in output_layers]
self.classification_backbone = tf.keras.Model(
inputs=full_resnet_v1_model.inputs,
outputs=outputs)
# pylint:disable=g-long-lambda
self._depth_fn = lambda d: max(
int(d * self._depth_multiplier), self._min_depth)
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._depth_fn(self._additional_layer_depth),
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
# Construct coarse feature layers
depth = self._depth_fn(self._additional_layer_depth)
for i in range(self._base_fpn_max_level, self._fpn_max_level):
layers = []
layer_name = 'bottom_up_block{}'.format(i)
layers.append(
tf.keras.layers.Conv2D(
depth,
[3, 3],
padding='SAME',
strides=2,
name=layer_name + '_conv',
**self._conv_hyperparams.params()))
layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
self._conv_hyperparams.build_activation_layer(
name=layer_name))
self._coarse_feature_layers.append(layers)
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge.
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
feature_block_map = dict(
list(zip(self._resnet_block_names, image_features)))
fpn_input_image_features = [
(feature_block, feature_block_map[feature_block])
for feature_block in feature_block_list]
fpn_features = self._fpn_features_generator(fpn_input_image_features)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_block{}'.format(level-1)])
last_feature_map = fpn_features['top_down_block{}'.format(
self._base_fpn_max_level - 1)]
for coarse_feature_layers in self._coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
return feature_maps
class SSDResNet50V1FpnKerasFeatureExtractor(
SSDResNetV1FpnKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based ResnetV1-50 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=False,
name='ResNet50V1_FPN'):
"""SSD Keras based FPN feature extractor ResnetV1-50 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: whether to use explicit padding when extracting
features. Default is None, as it's an invalid option and not implemented
in this feature extractor.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDResNet50V1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDResNet101V1FpnKerasFeatureExtractor(
SSDResNetV1FpnKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based ResnetV1-101 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=False,
name='ResNet101V1_FPN'):
"""SSD Keras based FPN feature extractor ResnetV1-101 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: whether to use explicit padding when extracting
features. Default is None, as it's an invalid option and not implemented
in this feature extractor.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDResNet101V1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDResNet152V1FpnKerasFeatureExtractor(
SSDResNetV1FpnKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based ResnetV1-152 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=None,
override_base_feature_extractor_hyperparams=False,
name='ResNet152V1_FPN'):
"""SSD Keras based FPN feature extractor ResnetV1-152 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: whether to use explicit padding when extracting
features. Default is None, as it's an invalid option and not implemented
in this feature extractor.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDResNet152V1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
| 20,749 | 44.404814 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_tf1_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v3_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_mobilenet_v3_feature_extractor
from object_detection.models import ssd_mobilenet_v3_feature_extractor_testbase
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV3LargeFeatureExtractorTest(
ssd_mobilenet_v3_feature_extractor_testbase
._SsdMobilenetV3FeatureExtractorTestBase):
def _get_input_sizes(self):
"""Return first two input feature map sizes."""
return [672, 480]
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
use_keras=False):
"""Constructs a new Mobilenet V3-Large feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (
ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3LargeFeatureExtractor(
False,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV3SmallFeatureExtractorTest(
ssd_mobilenet_v3_feature_extractor_testbase
._SsdMobilenetV3FeatureExtractorTestBase):
def _get_input_sizes(self):
"""Return first two input feature map sizes."""
return [288, 288]
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
use_keras=False):
"""Constructs a new Mobilenet V3-Small feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (
ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3SmallFeatureExtractor(
False,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
if __name__ == '__main__':
tf.test.main()
| 3,983 | 36.584906 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_resnet_v1_fpn_keras_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_res_fpn
from object_detection.protos import hyperparams_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_extractor(self):
return frcnn_res_fpn.FasterRCNNResnet50FpnKerasFeatureExtractor(
is_training=False,
conv_hyperparams=self._build_conv_hyperparams(),
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[2, 448, 448, 3], maxval=255, dtype=tf.float32)
rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shapes = [tf.shape(rpn_feature_map)
for rpn_feature_map in rpn_feature_maps]
self.assertAllEqual(features_shapes[0].numpy(), [2, 112, 112, 256])
self.assertAllEqual(features_shapes[1].numpy(), [2, 56, 56, 256])
self.assertAllEqual(features_shapes[2].numpy(), [2, 28, 28, 256])
self.assertAllEqual(features_shapes[3].numpy(), [2, 14, 14, 256])
self.assertAllEqual(features_shapes[4].numpy(), [2, 7, 7, 256])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[2, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shapes = [tf.shape(rpn_feature_map)
for rpn_feature_map in rpn_feature_maps]
self.assertAllEqual(features_shapes[0].numpy(), [2, 56, 56, 256])
self.assertAllEqual(features_shapes[1].numpy(), [2, 28, 28, 256])
self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256])
self.assertAllEqual(features_shapes[3].numpy(), [2, 7, 7, 256])
self.assertAllEqual(features_shapes[4].numpy(), [2, 4, 4, 256])
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
proposal_feature_maps = tf.random_uniform(
[3, 7, 7, 1024], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
self.assertAllEqual(features_shape.numpy(), [3, 1, 1, 1024])
| 4,117 | 42.347368 | 102 | py |
models | models-master/research/object_detection/models/ssd_mobiledet_feature_extractor_tf1_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobiledet_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobiledet_feature_extractor
from object_detection.utils import tf_version
try:
from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDMobileDetFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
feature_extractor_cls,
is_training=False,
depth_multiplier=1.0,
pad_to_multiple=1,
use_explicit_padding=False,
use_keras=False):
"""Constructs a new MobileDet feature extractor.
Args:
feature_extractor_cls: feature extractor class.
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: If True, we will use 'VALID' padding for
convolutions, but prepad inputs so that the output dimensions are the
same as if 'SAME' padding were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDMobileDetFeatureExtractor object.
"""
min_depth = 32
return feature_extractor_cls(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding)
def test_mobiledet_cpu_returns_correct_shapes(self):
expected_feature_map_shapes = [(2, 40, 20, 72),
(2, 20, 10, 144),
(2, 10, 5, 512),
(2, 5, 3, 256),
(2, 3, 2, 256),
(2, 2, 1, 128)]
feature_extractor = self._create_feature_extractor(
ssd_mobiledet_feature_extractor.SSDMobileDetCPUFeatureExtractor)
image = tf.random.normal((2, 640, 320, 3))
feature_maps = feature_extractor.extract_features(image)
self.assertEqual(len(expected_feature_map_shapes), len(feature_maps))
for expected_shape, x in zip(expected_feature_map_shapes, feature_maps):
self.assertTrue(x.shape.is_compatible_with(expected_shape))
def test_mobiledet_dsp_returns_correct_shapes(self):
expected_feature_map_shapes = [(2, 40, 20, 144),
(2, 20, 10, 240),
(2, 10, 5, 512),
(2, 5, 3, 256),
(2, 3, 2, 256),
(2, 2, 1, 128)]
feature_extractor = self._create_feature_extractor(
ssd_mobiledet_feature_extractor.SSDMobileDetDSPFeatureExtractor)
image = tf.random.normal((2, 640, 320, 3))
feature_maps = feature_extractor.extract_features(image)
self.assertEqual(len(expected_feature_map_shapes), len(feature_maps))
for expected_shape, x in zip(expected_feature_map_shapes, feature_maps):
self.assertTrue(x.shape.is_compatible_with(expected_shape))
def test_mobiledet_edgetpu_returns_correct_shapes(self):
expected_feature_map_shapes = [(2, 40, 20, 96),
(2, 20, 10, 384),
(2, 10, 5, 512),
(2, 5, 3, 256),
(2, 3, 2, 256),
(2, 2, 1, 128)]
feature_extractor = self._create_feature_extractor(
ssd_mobiledet_feature_extractor.SSDMobileDetEdgeTPUFeatureExtractor)
image = tf.random.normal((2, 640, 320, 3))
feature_maps = feature_extractor.extract_features(image)
self.assertEqual(len(expected_feature_map_shapes), len(feature_maps))
for expected_shape, x in zip(expected_feature_map_shapes, feature_maps):
self.assertTrue(x.shape.is_compatible_with(expected_shape))
def test_mobiledet_gpu_returns_correct_shapes(self):
expected_feature_map_shapes = [(2, 40, 20, 128), (2, 20, 10, 384),
(2, 10, 5, 512), (2, 5, 3, 256),
(2, 3, 2, 256), (2, 2, 1, 128)]
feature_extractor = self._create_feature_extractor(
ssd_mobiledet_feature_extractor.SSDMobileDetGPUFeatureExtractor)
image = tf.random.normal((2, 640, 320, 3))
feature_maps = feature_extractor.extract_features(image)
self.assertEqual(len(expected_feature_map_shapes), len(feature_maps))
for expected_shape, x in zip(expected_feature_map_shapes, feature_maps):
self.assertTrue(x.shape.is_compatible_with(expected_shape))
def _check_quantization(self, model_fn):
checkpoint_dir = self.get_temp_dir()
with tf.Graph().as_default() as training_graph:
model_fn(is_training=True)
contrib_quantize.experimental_create_training_graph(training_graph)
with self.session(graph=training_graph) as sess:
sess.run(tf.global_variables_initializer())
tf.train.Saver().save(sess, checkpoint_dir)
with tf.Graph().as_default() as eval_graph:
model_fn(is_training=False)
contrib_quantize.experimental_create_eval_graph(eval_graph)
with self.session(graph=eval_graph) as sess:
tf.train.Saver().restore(sess, checkpoint_dir)
def test_mobiledet_cpu_quantization(self):
def model_fn(is_training):
feature_extractor = self._create_feature_extractor(
ssd_mobiledet_feature_extractor.SSDMobileDetCPUFeatureExtractor,
is_training=is_training)
image = tf.random.normal((2, 320, 320, 3))
feature_extractor.extract_features(image)
self._check_quantization(model_fn)
def test_mobiledet_dsp_quantization(self):
def model_fn(is_training):
feature_extractor = self._create_feature_extractor(
ssd_mobiledet_feature_extractor.SSDMobileDetDSPFeatureExtractor,
is_training=is_training)
image = tf.random.normal((2, 320, 320, 3))
feature_extractor.extract_features(image)
self._check_quantization(model_fn)
def test_mobiledet_edgetpu_quantization(self):
def model_fn(is_training):
feature_extractor = self._create_feature_extractor(
ssd_mobiledet_feature_extractor.SSDMobileDetEdgeTPUFeatureExtractor,
is_training=is_training)
image = tf.random.normal((2, 320, 320, 3))
feature_extractor.extract_features(image)
self._check_quantization(model_fn)
if __name__ == '__main__':
tf.test.main()
| 7,657 | 43.265896 | 100 | py |
models | models-master/research/object_detection/models/keras_models/hourglass_network_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing the Hourglass network."""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models.keras_models import hourglass_network as hourglass
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase):
def test_identity_layer(self):
layer = hourglass.IdentityLayer()
output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32))
self.assertEqual(output.shape, (2, 32, 32, 3))
def test_skip_conv_layer_stride_1(self):
layer = hourglass.SkipConvolution(out_channels=8, stride=1)
output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32))
self.assertEqual(output.shape, (2, 32, 32, 8))
def test_skip_conv_layer_stride_2(self):
layer = hourglass.SkipConvolution(out_channels=8, stride=2)
output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32))
self.assertEqual(output.shape, (2, 16, 16, 8))
@parameterized.parameters([{'kernel_size': 1},
{'kernel_size': 3},
{'kernel_size': 7}])
def test_conv_block(self, kernel_size):
layer = hourglass.ConvolutionalBlock(
out_channels=8, kernel_size=kernel_size, stride=1)
output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32))
self.assertEqual(output.shape, (2, 32, 32, 8))
layer = hourglass.ConvolutionalBlock(
out_channels=8, kernel_size=kernel_size, stride=2)
output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32))
self.assertEqual(output.shape, (2, 16, 16, 8))
def test_residual_block_stride_1(self):
layer = hourglass.ResidualBlock(out_channels=8, stride=1)
output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32))
self.assertEqual(output.shape, (2, 32, 32, 8))
def test_residual_block_stride_2(self):
layer = hourglass.ResidualBlock(out_channels=8, stride=2,
skip_conv=True)
output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32))
self.assertEqual(output.shape, (2, 16, 16, 8))
def test_input_downsample_block(self):
layer = hourglass.InputDownsampleBlock(
out_channels_initial_conv=4, out_channels_residual_block=8)
output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32))
self.assertEqual(output.shape, (2, 8, 8, 8))
def test_input_conv_block(self):
layer = hourglass.InputConvBlock(
out_channels_initial_conv=4, out_channels_residual_block=8)
output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32))
self.assertEqual(output.shape, (2, 32, 32, 8))
def test_encoder_decoder_block(self):
layer = hourglass.EncoderDecoderBlock(
num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6],
channel_dims=[4, 6, 8, 10, 12])
output = layer(np.zeros((2, 64, 64, 4), dtype=np.float32))
self.assertEqual(output.shape, (2, 64, 64, 4))
def test_hourglass_feature_extractor(self):
model = hourglass.HourglassNetwork(
num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], input_channel_dims=4,
channel_dims_per_stage=[6, 8, 10, 12, 14], num_hourglasses=2)
outputs = model(np.zeros((2, 64, 64, 3), dtype=np.float32))
self.assertEqual(outputs[0].shape, (2, 16, 16, 6))
self.assertEqual(outputs[1].shape, (2, 16, 16, 6))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class HourglassDepthTest(tf.test.TestCase):
def test_hourglass_104(self):
net = hourglass.hourglass_104()
self.assertEqual(hourglass.hourglass_depth(net), 104)
def test_hourglass_10(self):
net = hourglass.hourglass_10(2, initial_downsample=False)
self.assertEqual(hourglass.hourglass_depth(net), 10)
outputs = net(tf.zeros((2, 32, 32, 3)))
self.assertEqual(outputs[0].shape, (2, 32, 32, 4))
def test_hourglass_20(self):
net = hourglass.hourglass_20(2, initial_downsample=False)
self.assertEqual(hourglass.hourglass_depth(net), 20)
outputs = net(tf.zeros((2, 32, 32, 3)))
self.assertEqual(outputs[0].shape, (2, 32, 32, 4))
def test_hourglass_32(self):
net = hourglass.hourglass_32(2, initial_downsample=False)
self.assertEqual(hourglass.hourglass_depth(net), 32)
outputs = net(tf.zeros((2, 32, 32, 3)))
self.assertEqual(outputs[0].shape, (2, 32, 32, 4))
def test_hourglass_52(self):
net = hourglass.hourglass_52(2, initial_downsample=False)
self.assertEqual(hourglass.hourglass_depth(net), 52)
outputs = net(tf.zeros((2, 32, 32, 3)))
self.assertEqual(outputs[0].shape, (2, 32, 32, 4))
def test_hourglass_20_uniform_size(self):
net = hourglass.hourglass_20_uniform_size(2)
self.assertEqual(hourglass.hourglass_depth(net), 20)
outputs = net(tf.zeros((2, 32, 32, 3)))
self.assertEqual(outputs[0].shape, (2, 32, 32, 4))
def test_hourglass_100(self):
net = hourglass.hourglass_100(2, initial_downsample=False)
self.assertEqual(hourglass.hourglass_depth(net), 100)
outputs = net(tf.zeros((2, 32, 32, 3)))
self.assertEqual(outputs[0].shape, (2, 32, 32, 4))
if __name__ == '__main__':
tf.test.main()
| 5,882 | 36 | 80 | py |
models | models-master/research/object_detection/models/keras_models/mobilenet_v2.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around the MobileNet v2 models for Keras, for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.core import freezable_batch_norm
from object_detection.models.keras_models import model_utils
from object_detection.utils import ops
# pylint: disable=invalid-name
# This method copied from the slim mobilenet base network code (same license)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class _LayersOverride(object):
"""Alternative Keras layers interface for the Keras MobileNetV2."""
def __init__(self,
batchnorm_training,
default_batchnorm_momentum=0.999,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None):
"""Alternative tf.keras.layers interface, for use by the Keras MobileNetV2.
It is used by the Keras applications kwargs injection API to
modify the Mobilenet v2 Keras application with changes required by
the Object Detection API.
These injected interfaces make the following changes to the network:
- Applies the Object Detection hyperparameter configuration
- Supports FreezableBatchNorms
- Adds support for a min number of filters for each layer
- Makes the `alpha` parameter affect the final convolution block even if it
is less than 1.0
- Adds support for explicit padding of convolutions
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default mobilenet_v2 layer builders.
use_explicit_padding: If True, use 'valid' padding for convolutions,
but explicitly pre-pads inputs so that the output dimensions are the
same as if 'same' padding were used. Off by default.
alpha: The width multiplier referenced in the MobileNetV2 paper. It
modifies the number of filters in each convolutional layer.
min_depth: Minimum number of filters in the convolutional layers.
conv_defs: Network layout to specify the mobilenet_v2 body. Default is
`None` to use the default mobilenet_v2 network layout.
"""
self._alpha = alpha
self._batchnorm_training = batchnorm_training
self._default_batchnorm_momentum = default_batchnorm_momentum
self._conv_hyperparams = conv_hyperparams
self._use_explicit_padding = use_explicit_padding
self._min_depth = min_depth
self._conv_defs = conv_defs
self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5)
self.initializer = tf.truncated_normal_initializer(stddev=0.09)
def _FixedPaddingLayer(self, kernel_size):
return tf.keras.layers.Lambda(lambda x: ops.fixed_padding(x, kernel_size))
def Conv2D(self, filters, **kwargs):
"""Builds a Conv2D layer according to the current Object Detection config.
Overrides the Keras MobileNetV2 application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
filters: The number of filters to use for the convolution.
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras Conv2D layer to
the input argument, or that will first pad the input then apply a Conv2D
layer.
"""
# Make sure 'alpha' is always applied to the last convolution block's size
# (This overrides the Keras application's functionality)
layer_name = kwargs.get('name')
if layer_name == 'Conv_1':
if self._conv_defs:
filters = model_utils.get_conv_def(self._conv_defs, 'Conv_1')
else:
filters = 1280
if self._alpha < 1.0:
filters = _make_divisible(filters * self._alpha, 8)
# Apply the minimum depth to the convolution layers
if (self._min_depth and (filters < self._min_depth)
and not kwargs.get('name').endswith('expand')):
filters = self._min_depth
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
else:
kwargs['kernel_regularizer'] = self.regularizer
kwargs['kernel_initializer'] = self.initializer
kwargs['padding'] = 'same'
kernel_size = kwargs.get('kernel_size')
if self._use_explicit_padding and kernel_size > 1:
kwargs['padding'] = 'valid'
def padded_conv(features):
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.Conv2D(filters, **kwargs)(padded_features)
return padded_conv
else:
return tf.keras.layers.Conv2D(filters, **kwargs)
def DepthwiseConv2D(self, **kwargs):
"""Builds a DepthwiseConv2D according to the Object Detection config.
Overrides the Keras MobileNetV2 application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras DepthwiseConv2D
layer to the input argument, or that will first pad the input then apply
the depthwise convolution.
"""
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
# Both the regularizer and initializer apply to the depthwise layer in
# MobilenetV1, so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
else:
kwargs['depthwise_regularizer'] = self.regularizer
kwargs['depthwise_initializer'] = self.initializer
kwargs['padding'] = 'same'
kernel_size = kwargs.get('kernel_size')
if self._use_explicit_padding and kernel_size > 1:
kwargs['padding'] = 'valid'
def padded_depthwise_conv(features):
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.DepthwiseConv2D(**kwargs)(padded_features)
return padded_depthwise_conv
else:
return tf.keras.layers.DepthwiseConv2D(**kwargs)
def BatchNormalization(self, **kwargs):
"""Builds a normalization layer.
Overrides the Keras application batch norm with the norm specified by the
Object Detection configuration.
Args:
**kwargs: Only the name is used, all other params ignored.
Required for matching `layers.BatchNormalization` calls in the Keras
application.
Returns:
A normalization layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_batch_norm(
training=self._batchnorm_training,
name=name)
else:
return freezable_batch_norm.FreezableBatchNorm(
training=self._batchnorm_training,
epsilon=1e-3,
momentum=self._default_batchnorm_momentum,
name=name)
def Input(self, shape):
"""Builds an Input layer.
Overrides the Keras application Input layer with one that uses a
tf.placeholder_with_default instead of a tf.placeholder. This is necessary
to ensure the application works when run on a TPU.
Args:
shape: The shape for the input layer to use. (Does not include a dimension
for the batch size).
Returns:
An input layer for the specified shape that internally uses a
placeholder_with_default.
"""
default_size = 224
default_batch_size = 1
shape = list(shape)
default_shape = [default_size if dim is None else dim for dim in shape]
input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape)
placeholder_with_default = tf.placeholder_with_default(
input=input_tensor, shape=[None] + shape)
return model_utils.input_layer(shape, placeholder_with_default)
# pylint: disable=unused-argument
def ReLU(self, *args, **kwargs):
"""Builds an activation layer.
Overrides the Keras application ReLU with the activation specified by the
Object Detection configuration.
Args:
*args: Ignored, required to match the `tf.keras.ReLU` interface
**kwargs: Only the name is used,
required to match `tf.keras.ReLU` interface
Returns:
An activation layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_activation_layer(name=name)
else:
return tf.keras.layers.Lambda(tf.nn.relu6, name=name)
# pylint: enable=unused-argument
# pylint: disable=unused-argument
def ZeroPadding2D(self, **kwargs):
"""Replaces explicit padding in the Keras application with a no-op.
Args:
**kwargs: Ignored, required to match the Keras applications usage.
Returns:
A no-op identity lambda.
"""
return lambda x: x
# pylint: enable=unused-argument
# Forward all non-overridden methods to the keras layers
def __getattr__(self, item):
return getattr(tf.keras.layers, item)
def mobilenet_v2(batchnorm_training,
default_batchnorm_momentum=0.9997,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None,
**kwargs):
"""Instantiates the MobileNetV2 architecture, modified for object detection.
This wraps the MobileNetV2 tensorflow Keras application, but uses the
Keras application's kwargs-based monkey-patching API to override the Keras
architecture with the following changes:
- Changes the default batchnorm momentum to 0.9997
- Applies the Object Detection hyperparameter configuration
- Supports FreezableBatchNorms
- Adds support for a min number of filters for each layer
- Makes the `alpha` parameter affect the final convolution block even if it
is less than 1.0
- Adds support for explicit padding of convolutions
- Makes the Input layer use a tf.placeholder_with_default instead of a
tf.placeholder, to work on TPUs.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default mobilenet_v2 layer builders.
use_explicit_padding: If True, use 'valid' padding for convolutions,
but explicitly pre-pads inputs so that the output dimensions are the
same as if 'same' padding were used. Off by default.
alpha: The width multiplier referenced in the MobileNetV2 paper. It
modifies the number of filters in each convolutional layer.
min_depth: Minimum number of filters in the convolutional layers.
conv_defs: Network layout to specify the mobilenet_v2 body. Default is
`None` to use the default mobilenet_v2 network layout.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.MobilenetV2` method that constructs the Keras
model.
Returns:
A Keras model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
default_batchnorm_momentum=default_batchnorm_momentum,
conv_hyperparams=conv_hyperparams,
use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=alpha,
conv_defs=conv_defs)
return tf.keras.applications.MobileNetV2(alpha=alpha,
layers=layers_override,
**kwargs)
# pylint: enable=invalid-name
| 13,461 | 39.185075 | 80 | py |
models | models-master/research/object_detection/models/keras_models/mobilenet_v2_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models.keras_models import mobilenet_v2
from object_detection.models.keras_models import model_utils
from object_detection.models.keras_models import test_utils
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
_layers_to_check = [
'Conv1_relu',
'block_1_expand_relu', 'block_1_depthwise_relu', 'block_1_project_BN',
'block_2_expand_relu', 'block_2_depthwise_relu', 'block_2_project_BN',
'block_3_expand_relu', 'block_3_depthwise_relu', 'block_3_project_BN',
'block_4_expand_relu', 'block_4_depthwise_relu', 'block_4_project_BN',
'block_5_expand_relu', 'block_5_depthwise_relu', 'block_5_project_BN',
'block_6_expand_relu', 'block_6_depthwise_relu', 'block_6_project_BN',
'block_7_expand_relu', 'block_7_depthwise_relu', 'block_7_project_BN',
'block_8_expand_relu', 'block_8_depthwise_relu', 'block_8_project_BN',
'block_9_expand_relu', 'block_9_depthwise_relu', 'block_9_project_BN',
'block_10_expand_relu', 'block_10_depthwise_relu', 'block_10_project_BN',
'block_11_expand_relu', 'block_11_depthwise_relu', 'block_11_project_BN',
'block_12_expand_relu', 'block_12_depthwise_relu', 'block_12_project_BN',
'block_13_expand_relu', 'block_13_depthwise_relu', 'block_13_project_BN',
'block_14_expand_relu', 'block_14_depthwise_relu', 'block_14_project_BN',
'block_15_expand_relu', 'block_15_depthwise_relu', 'block_15_project_BN',
'block_16_expand_relu', 'block_16_depthwise_relu', 'block_16_project_BN',
'out_relu']
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MobilenetV2Test(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
train: true,
scale: false,
center: true,
decay: 0.2,
epsilon: 0.1,
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_application_with_layer_outputs(
self, layer_names, batchnorm_training,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None):
"""Constructs Keras mobilenetv2 that extracts intermediate layer outputs."""
# Have to clear the Keras backend to ensure isolation in layer naming
tf.keras.backend.clear_session()
if not layer_names:
layer_names = _layers_to_check
full_model = mobilenet_v2.mobilenet_v2(
batchnorm_training=batchnorm_training,
conv_hyperparams=conv_hyperparams,
weights=None,
use_explicit_padding=use_explicit_padding,
alpha=alpha,
min_depth=min_depth,
include_top=False,
conv_defs=conv_defs)
layer_outputs = [full_model.get_layer(name=layer).output
for layer in layer_names]
return tf.keras.Model(
inputs=full_model.inputs,
outputs=layer_outputs)
def _check_returns_correct_shape(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False, min_depth=None,
layer_names=None, conv_defs=None):
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False,
use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=depth_multiplier,
conv_defs=conv_defs)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = model([image_tensor])
for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def _check_returns_correct_shapes_with_dynamic_inputs(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False,
layer_names=None):
height = tf.random.uniform([], minval=image_height, maxval=image_height+1,
dtype=tf.int32)
width = tf.random.uniform([], minval=image_width, maxval=image_width+1,
dtype=tf.int32)
image_tensor = tf.random.uniform([batch_size, height, width,
3], dtype=tf.float32)
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
alpha=depth_multiplier)
feature_maps = model(image_tensor)
for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, depth_multiplier, layer_names=None):
tf.keras.backend.clear_session()
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=False,
alpha=depth_multiplier)
preprocessed_inputs = tf.random.uniform([2, 40, 40, 3])
model(preprocessed_inputs)
return model.variables
def test_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_128)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_128_explicit_padding)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=True)
def test_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.mobilenet_v2_expected_feature_map_shape_with_dynamic_inputs)
self._check_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_299)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_enforcing_min_depth)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, min_depth=32)
def test_returns_correct_shapes_with_conv_defs(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
conv_1 = model_utils.ConvDefs(
conv_name='Conv_1', filters=256)
conv_defs = [conv_1]
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_with_conv_defs)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, conv_defs=conv_defs)
def test_hyperparam_override(self):
hyperparams = self._build_conv_hyperparams()
model = mobilenet_v2.mobilenet_v2(
batchnorm_training=True,
conv_hyperparams=hyperparams,
weights=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=32,
include_top=False)
hyperparams.params()
bn_layer = model.get_layer(name='block_5_project_BN')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
depth_multiplier = 1
variables = self._get_variables(depth_multiplier)
self.assertEqual(len(variables), 260)
if __name__ == '__main__':
tf.test.main()
| 9,697 | 37.792 | 80 | py |
models | models-master/research/object_detection/models/keras_models/resnet_v1.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around the Keras Resnet V1 models for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.applications import resnet
import tensorflow.compat.v1 as tf
from object_detection.core import freezable_batch_norm
from object_detection.models.keras_models import model_utils
def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
class _LayersOverride(object):
"""Alternative Keras layers interface for the Keras Resnet V1."""
def __init__(self,
batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1):
"""Alternative tf.keras.layers interface, for use by the Keras Resnet V1.
The class is used by the Keras applications kwargs injection API to
modify the Resnet V1 Keras application with changes required by
the Object Detection API.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
"""
self._batchnorm_training = batchnorm_training
self._batchnorm_scale = batchnorm_scale
self._default_batchnorm_momentum = default_batchnorm_momentum
self._default_batchnorm_epsilon = default_batchnorm_epsilon
self._conv_hyperparams = conv_hyperparams
self._min_depth = min_depth
self._depth_multiplier = depth_multiplier
self.regularizer = tf.keras.regularizers.l2(weight_decay)
self.initializer = tf.variance_scaling_initializer()
def _FixedPaddingLayer(self, kernel_size, rate=1): # pylint: disable=invalid-name
return tf.keras.layers.Lambda(
lambda x: _fixed_padding(x, kernel_size, rate))
def Conv2D(self, filters, kernel_size, **kwargs): # pylint: disable=invalid-name
"""Builds a Conv2D layer according to the current Object Detection config.
Overrides the Keras Resnet application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
filters: The number of filters to use for the convolution.
kernel_size: The kernel size to specify the height and width of the 2D
convolution window.
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras Conv2D layer to
the input argument, or that will first pad the input then apply a Conv2D
layer.
"""
# Apply the minimum depth to the convolution layers.
filters = max(int(filters * self._depth_multiplier), self._min_depth)
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
else:
kwargs['kernel_regularizer'] = self.regularizer
kwargs['kernel_initializer'] = self.initializer
# Set use_bias as false to keep it consistent with Slim Resnet model.
kwargs['use_bias'] = False
kwargs['padding'] = 'same'
stride = kwargs.get('strides')
if stride and kernel_size and stride > 1 and kernel_size > 1:
kwargs['padding'] = 'valid'
def padded_conv(features): # pylint: disable=invalid-name
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.Conv2D(
filters, kernel_size, **kwargs)(padded_features)
return padded_conv
else:
return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs)
def Activation(self, *args, **kwargs): # pylint: disable=unused-argument,invalid-name
"""Builds an activation layer.
Overrides the Keras application Activation layer specified by the
Object Detection configuration.
Args:
*args: Ignored,
required to match the `tf.keras.layers.Activation` interface.
**kwargs: Only the name is used,
required to match `tf.keras.layers.Activation` interface.
Returns:
An activation layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_activation_layer(name=name)
else:
return tf.keras.layers.Lambda(tf.nn.relu, name=name)
def BatchNormalization(self, **kwargs): # pylint: disable=invalid-name
"""Builds a normalization layer.
Overrides the Keras application batch norm with the norm specified by the
Object Detection configuration.
Args:
**kwargs: Only the name is used, all other params ignored.
Required for matching `layers.BatchNormalization` calls in the Keras
application.
Returns:
A normalization layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_batch_norm(
training=self._batchnorm_training,
name=name)
else:
kwargs['scale'] = self._batchnorm_scale
kwargs['epsilon'] = self._default_batchnorm_epsilon
return freezable_batch_norm.FreezableBatchNorm(
training=self._batchnorm_training,
momentum=self._default_batchnorm_momentum,
**kwargs)
def Input(self, shape): # pylint: disable=invalid-name
"""Builds an Input layer.
Overrides the Keras application Input layer with one that uses a
tf.placeholder_with_default instead of a tf.placeholder. This is necessary
to ensure the application works when run on a TPU.
Args:
shape: A tuple of integers representing the shape of the input, which
includes both spatial share and channels, but not the batch size.
Elements of this tuple can be None; 'None' elements represent dimensions
where the shape is not known.
Returns:
An input layer for the specified shape that internally uses a
placeholder_with_default.
"""
default_size = 224
default_batch_size = 1
shape = list(shape)
default_shape = [default_size if dim is None else dim for dim in shape]
input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape)
placeholder_with_default = tf.placeholder_with_default(
input=input_tensor, shape=[None] + shape)
return model_utils.input_layer(shape, placeholder_with_default)
def MaxPooling2D(self, pool_size, **kwargs): # pylint: disable=invalid-name
"""Builds a MaxPooling2D layer with default padding as 'SAME'.
This is specified by the default resnet arg_scope in slim.
Args:
pool_size: The pool size specified by the Keras application.
**kwargs: Ignored, required to match the Keras applications usage.
Returns:
A MaxPooling2D layer with default padding as 'SAME'.
"""
kwargs['padding'] = 'same'
return tf.keras.layers.MaxPooling2D(pool_size, **kwargs)
# Add alias as Keras also has it.
MaxPool2D = MaxPooling2D # pylint: disable=invalid-name
def ZeroPadding2D(self, padding, **kwargs): # pylint: disable=unused-argument,invalid-name
"""Replaces explicit padding in the Keras application with a no-op.
Args:
padding: The padding values for image height and width.
**kwargs: Ignored, required to match the Keras applications usage.
Returns:
A no-op identity lambda.
"""
return lambda x: x
# Forward all non-overridden methods to the keras layers
def __getattr__(self, item):
return getattr(tf.keras.layers, item)
# pylint: disable=invalid-name
def resnet_v1_50(batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1,
**kwargs):
"""Instantiates the Resnet50 architecture, modified for object detection.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras ResnetV1-50 model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
batchnorm_scale=batchnorm_scale,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
conv_hyperparams=conv_hyperparams,
weight_decay=weight_decay,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
return tf.keras.applications.resnet.ResNet50(
layers=layers_override, **kwargs)
def resnet_v1_101(batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1,
**kwargs):
"""Instantiates the Resnet50 architecture, modified for object detection.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras ResnetV1-101 model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
batchnorm_scale=batchnorm_scale,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
conv_hyperparams=conv_hyperparams,
weight_decay=weight_decay,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
return tf.keras.applications.resnet.ResNet101(
layers=layers_override, **kwargs)
def resnet_v1_152(batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1,
**kwargs):
"""Instantiates the Resnet50 architecture, modified for object detection.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras ResnetV1-152 model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
batchnorm_scale=batchnorm_scale,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
conv_hyperparams=conv_hyperparams,
weight_decay=weight_decay,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
return tf.keras.applications.resnet.ResNet152(
layers=layers_override, **kwargs)
# pylint: enable=invalid-name
# The following codes are based on the existing keras ResNet model pattern:
# google3/third_party/py/keras/applications/resnet.py
def block_basic(x,
filters,
kernel_size=3,
stride=1,
conv_shortcut=False,
name=None):
"""A residual block for ResNet18/34.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True, otherwise
identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
layers = tf.keras.layers
bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1
preact = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_preact_bn')(
x)
preact = layers.Activation('relu', name=name + '_preact_relu')(preact)
if conv_shortcut:
shortcut = layers.Conv2D(
filters, 1, strides=1, name=name + '_0_conv')(
preact)
else:
shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
x = layers.ZeroPadding2D(
padding=((1, 1), (1, 1)), name=name + '_1_pad')(
preact)
x = layers.Conv2D(
filters, kernel_size, strides=1, use_bias=False, name=name + '_1_conv')(
x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.Conv2D(
filters,
kernel_size,
strides=stride,
use_bias=False,
name=name + '_2_conv')(
x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(
x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Add(name=name + '_out')([shortcut, x])
return x
def stack_basic(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks for ResNet18/34.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block_basic(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = block_basic(x, filters, name=name + '_block' + str(i))
x = block_basic(
x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x
def resnet_v1_18(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the ResNet18 architecture."""
def stack_fn(x):
x = stack_basic(x, 64, 2, stride1=1, name='conv2')
x = stack_basic(x, 128, 2, name='conv3')
x = stack_basic(x, 256, 2, name='conv4')
return stack_basic(x, 512, 2, name='conv5')
return resnet.ResNet(
stack_fn,
True,
True,
'resnet18',
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation)
def resnet_v1_34(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the ResNet34 architecture."""
def stack_fn(x):
x = stack_basic(x, 64, 3, stride1=1, name='conv2')
x = stack_basic(x, 128, 4, name='conv3')
x = stack_basic(x, 256, 6, name='conv4')
return stack_basic(x, 512, 3, name='conv5')
return resnet.ResNet(
stack_fn,
True,
True,
'resnet34',
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation)
| 20,984 | 37.717712 | 93 | py |
models | models-master/research/object_detection/models/keras_models/hourglass_network.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Hourglass[1] network.
[1]: https://arxiv.org/abs/1603.06937
"""
import tensorflow.compat.v2 as tf
BATCH_NORM_EPSILON = 1e-5
BATCH_NORM_MOMENTUM = 0.1
BATCH_NORM_FUSED = True
class IdentityLayer(tf.keras.layers.Layer):
"""A layer which passes through the input as it is."""
def call(self, inputs):
return inputs
def _get_padding_for_kernel_size(kernel_size):
if kernel_size == 7:
return (3, 3)
elif kernel_size == 3:
return (1, 1)
else:
raise ValueError('Padding for kernel size {} not known.'.format(
kernel_size))
def batchnorm():
try:
return tf.keras.layers.experimental.SyncBatchNormalization(
name='batchnorm', epsilon=1e-5, momentum=0.1)
except AttributeError:
return tf.keras.layers.BatchNormalization(
name='batchnorm', epsilon=1e-5, momentum=0.1, fused=BATCH_NORM_FUSED)
class ConvolutionalBlock(tf.keras.layers.Layer):
"""Block that aggregates Convolution + Norm layer + ReLU."""
def __init__(self, kernel_size, out_channels, stride=1, relu=True,
padding='same'):
"""Initializes the Convolutional block.
Args:
kernel_size: int, convolution kernel size.
out_channels: int, the desired number of output channels.
stride: Integer, stride used in the convolution.
relu: bool, whether to use relu at the end of the layer.
padding: str, the padding scheme to use when kernel_size <= 1
"""
super(ConvolutionalBlock, self).__init__()
if kernel_size > 1:
padding = 'valid'
padding_size = _get_padding_for_kernel_size(kernel_size)
# TODO(vighneshb) Explore if removing and using padding option in conv
# layer works.
self.pad = tf.keras.layers.ZeroPadding2D(padding_size)
else:
self.pad = IdentityLayer()
self.conv = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=kernel_size, use_bias=False,
strides=stride, padding=padding)
self.norm = batchnorm()
if relu:
self.relu = tf.keras.layers.ReLU()
else:
self.relu = IdentityLayer()
def call(self, inputs):
net = self.pad(inputs)
net = self.conv(net)
net = self.norm(net)
return self.relu(net)
class SkipConvolution(ConvolutionalBlock):
"""The skip connection layer for a ResNet."""
def __init__(self, out_channels, stride):
"""Initializes the skip convolution layer.
Args:
out_channels: int, the desired number of output channels.
stride: int, the stride for the layer.
"""
super(SkipConvolution, self).__init__(
out_channels=out_channels, kernel_size=1, stride=stride, relu=False)
class ResidualBlock(tf.keras.layers.Layer):
"""A Residual block."""
def __init__(self, out_channels, skip_conv=False, kernel_size=3, stride=1,
padding='same'):
"""Initializes the Residual block.
Args:
out_channels: int, the desired number of output channels.
skip_conv: bool, whether to use a conv layer for skip connections.
kernel_size: int, convolution kernel size.
stride: Integer, stride used in the convolution.
padding: str, the type of padding to use.
"""
super(ResidualBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=kernel_size, out_channels=out_channels, stride=stride)
self.conv = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=kernel_size, use_bias=False,
strides=1, padding=padding)
self.norm = batchnorm()
if skip_conv:
self.skip = SkipConvolution(out_channels=out_channels,
stride=stride)
else:
self.skip = IdentityLayer()
self.relu = tf.keras.layers.ReLU()
def call(self, inputs):
net = self.conv_block(inputs)
net = self.conv(net)
net = self.norm(net)
net_skip = self.skip(inputs)
return self.relu(net + net_skip)
class InputDownsampleBlock(tf.keras.layers.Layer):
"""Block for the initial feature downsampling."""
def __init__(self, out_channels_initial_conv, out_channels_residual_block):
"""Initializes the downsample block.
Args:
out_channels_initial_conv: int, the desired number of output channels
in the initial conv layer.
out_channels_residual_block: int, the desired number of output channels
in the underlying residual block.
"""
super(InputDownsampleBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=7, out_channels=out_channels_initial_conv, stride=2,
padding='valid')
self.residual_block = ResidualBlock(
out_channels=out_channels_residual_block, stride=2, skip_conv=True)
def call(self, inputs):
return self.residual_block(self.conv_block(inputs))
class InputConvBlock(tf.keras.layers.Layer):
"""Block for the initial feature convolution.
This block is used in the hourglass network when we don't want to downsample
the input.
"""
def __init__(self, out_channels_initial_conv, out_channels_residual_block):
"""Initializes the downsample block.
Args:
out_channels_initial_conv: int, the desired number of output channels
in the initial conv layer.
out_channels_residual_block: int, the desired number of output channels
in the underlying residual block.
"""
super(InputConvBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=3, out_channels=out_channels_initial_conv, stride=1,
padding='valid')
self.residual_block = ResidualBlock(
out_channels=out_channels_residual_block, stride=1, skip_conv=True)
def call(self, inputs):
return self.residual_block(self.conv_block(inputs))
def _make_repeated_residual_blocks(out_channels, num_blocks,
initial_stride=1, residual_channels=None,
initial_skip_conv=False):
"""Stack Residual blocks one after the other.
Args:
out_channels: int, the desired number of output channels.
num_blocks: int, the number of residual blocks to be stacked.
initial_stride: int, the stride of the initial residual block.
residual_channels: int, the desired number of output channels in the
intermediate residual blocks. If not specifed, we use out_channels.
initial_skip_conv: bool, if set, the first residual block uses a skip
convolution. This is useful when the number of channels in the input
are not the same as residual_channels.
Returns:
blocks: A list of residual blocks to be applied in sequence.
"""
blocks = []
if residual_channels is None:
residual_channels = out_channels
for i in range(num_blocks - 1):
# Only use the stride at the first block so we don't repeatedly downsample
# the input
stride = initial_stride if i == 0 else 1
# If the stide is more than 1, we cannot use an identity layer for the
# skip connection and are forced to use a conv for the skip connection.
skip_conv = stride > 1
if i == 0 and initial_skip_conv:
skip_conv = True
blocks.append(
ResidualBlock(out_channels=residual_channels, stride=stride,
skip_conv=skip_conv)
)
if num_blocks == 1:
# If there is only 1 block, the for loop above is not run,
# therefore we honor the requested stride in the last residual block
stride = initial_stride
# We are forced to use a conv in the skip connection if stride > 1
skip_conv = stride > 1
else:
stride = 1
skip_conv = residual_channels != out_channels
blocks.append(ResidualBlock(out_channels=out_channels, skip_conv=skip_conv,
stride=stride))
return blocks
def _apply_blocks(inputs, blocks):
net = inputs
for block in blocks:
net = block(net)
return net
class EncoderDecoderBlock(tf.keras.layers.Layer):
"""An encoder-decoder block which recursively defines the hourglass network."""
def __init__(self, num_stages, channel_dims, blocks_per_stage,
stagewise_downsample=True, encoder_decoder_shortcut=True):
"""Initializes the encoder-decoder block.
Args:
num_stages: int, Number of stages in the network. At each stage we have 2
encoder and 1 decoder blocks. The second encoder block downsamples the
input.
channel_dims: int list, the output channels dimensions of stages in
the network. `channel_dims[0]` is used to define the number of
channels in the first encoder block and `channel_dims[1]` is used to
define the number of channels in the second encoder block. The channels
in the recursive inner layers are defined using `channel_dims[1:]`
blocks_per_stage: int list, number of residual blocks to use at each
stage. `blocks_per_stage[0]` defines the number of blocks at the
current stage and `blocks_per_stage[1:]` is used at further stages.
stagewise_downsample: bool, whether or not to downsample before passing
inputs to the next stage.
encoder_decoder_shortcut: bool, whether or not to use shortcut
connections between encoder and decoder.
"""
super(EncoderDecoderBlock, self).__init__()
out_channels = channel_dims[0]
out_channels_downsampled = channel_dims[1]
self.encoder_decoder_shortcut = encoder_decoder_shortcut
if encoder_decoder_shortcut:
self.merge_features = tf.keras.layers.Add()
self.encoder_block1 = _make_repeated_residual_blocks(
out_channels=out_channels, num_blocks=blocks_per_stage[0],
initial_stride=1)
initial_stride = 2 if stagewise_downsample else 1
self.encoder_block2 = _make_repeated_residual_blocks(
out_channels=out_channels_downsampled,
num_blocks=blocks_per_stage[0], initial_stride=initial_stride,
initial_skip_conv=out_channels != out_channels_downsampled)
if num_stages > 1:
self.inner_block = [
EncoderDecoderBlock(num_stages - 1, channel_dims[1:],
blocks_per_stage[1:],
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut)
]
else:
self.inner_block = _make_repeated_residual_blocks(
out_channels=out_channels_downsampled,
num_blocks=blocks_per_stage[1])
self.decoder_block = _make_repeated_residual_blocks(
residual_channels=out_channels_downsampled,
out_channels=out_channels, num_blocks=blocks_per_stage[0])
self.upsample = tf.keras.layers.UpSampling2D(initial_stride)
def call(self, inputs):
if self.encoder_decoder_shortcut:
encoded_outputs = _apply_blocks(inputs, self.encoder_block1)
encoded_downsampled_outputs = _apply_blocks(inputs, self.encoder_block2)
inner_block_outputs = _apply_blocks(
encoded_downsampled_outputs, self.inner_block)
decoded_outputs = _apply_blocks(inner_block_outputs, self.decoder_block)
upsampled_outputs = self.upsample(decoded_outputs)
if self.encoder_decoder_shortcut:
return self.merge_features([encoded_outputs, upsampled_outputs])
else:
return upsampled_outputs
class HourglassNetwork(tf.keras.Model):
"""The hourglass network."""
def __init__(self, num_stages, input_channel_dims, channel_dims_per_stage,
blocks_per_stage, num_hourglasses, initial_downsample=True,
stagewise_downsample=True, encoder_decoder_shortcut=True):
"""Intializes the feature extractor.
Args:
num_stages: int, Number of stages in the network. At each stage we have 2
encoder and 1 decoder blocks. The second encoder block downsamples the
input.
input_channel_dims: int, the number of channels in the input conv blocks.
channel_dims_per_stage: int list, the output channel dimensions of each
stage in the hourglass network.
blocks_per_stage: int list, number of residual blocks to use at each
stage in the hourglass network
num_hourglasses: int, number of hourglas networks to stack
sequentially.
initial_downsample: bool, if set, downsamples the input by a factor of 4
before applying the rest of the network. Downsampling is done with a 7x7
convolution kernel, otherwise a 3x3 kernel is used.
stagewise_downsample: bool, whether or not to downsample before passing
inputs to the next stage.
encoder_decoder_shortcut: bool, whether or not to use shortcut
connections between encoder and decoder.
"""
super(HourglassNetwork, self).__init__()
self.num_hourglasses = num_hourglasses
self.initial_downsample = initial_downsample
if initial_downsample:
self.downsample_input = InputDownsampleBlock(
out_channels_initial_conv=input_channel_dims,
out_channels_residual_block=channel_dims_per_stage[0]
)
else:
self.conv_input = InputConvBlock(
out_channels_initial_conv=input_channel_dims,
out_channels_residual_block=channel_dims_per_stage[0]
)
self.hourglass_network = []
self.output_conv = []
for _ in range(self.num_hourglasses):
self.hourglass_network.append(
EncoderDecoderBlock(
num_stages=num_stages, channel_dims=channel_dims_per_stage,
blocks_per_stage=blocks_per_stage,
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut)
)
self.output_conv.append(
ConvolutionalBlock(kernel_size=3,
out_channels=channel_dims_per_stage[0])
)
self.intermediate_conv1 = []
self.intermediate_conv2 = []
self.intermediate_residual = []
for _ in range(self.num_hourglasses - 1):
self.intermediate_conv1.append(
ConvolutionalBlock(
kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False)
)
self.intermediate_conv2.append(
ConvolutionalBlock(
kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False)
)
self.intermediate_residual.append(
ResidualBlock(out_channels=channel_dims_per_stage[0])
)
self.intermediate_relu = tf.keras.layers.ReLU()
def call(self, inputs):
if self.initial_downsample:
inputs = self.downsample_input(inputs)
else:
inputs = self.conv_input(inputs)
outputs = []
for i in range(self.num_hourglasses):
hourglass_output = self.hourglass_network[i](inputs)
output = self.output_conv[i](hourglass_output)
outputs.append(output)
if i < self.num_hourglasses - 1:
secondary_output = (self.intermediate_conv1[i](inputs) +
self.intermediate_conv2[i](output))
secondary_output = self.intermediate_relu(secondary_output)
inputs = self.intermediate_residual[i](secondary_output)
return outputs
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
return self.num_hourglasses
def _layer_depth(layer):
"""Compute depth of Conv/Residual blocks or lists of them."""
if isinstance(layer, list):
return sum([_layer_depth(l) for l in layer])
elif isinstance(layer, ConvolutionalBlock):
return 1
elif isinstance(layer, ResidualBlock):
return 2
else:
raise ValueError('Unknown layer - {}'.format(layer))
def _encoder_decoder_depth(network):
"""Helper function to compute depth of encoder-decoder blocks."""
encoder_block2_layers = _layer_depth(network.encoder_block2)
decoder_block_layers = _layer_depth(network.decoder_block)
if isinstance(network.inner_block[0], EncoderDecoderBlock):
assert len(network.inner_block) == 1, 'Inner block is expected as length 1.'
inner_block_layers = _encoder_decoder_depth(network.inner_block[0])
return inner_block_layers + encoder_block2_layers + decoder_block_layers
elif isinstance(network.inner_block[0], ResidualBlock):
return (encoder_block2_layers + decoder_block_layers +
_layer_depth(network.inner_block))
else:
raise ValueError('Unknown inner block type.')
def hourglass_depth(network):
"""Helper function to verify depth of hourglass backbone."""
input_conv_layers = 3 # 1 ResidualBlock and 1 ConvBlock
# Only intermediate_conv2 and intermediate_residual are applied before
# sending inputs to the later stages.
intermediate_layers = (
_layer_depth(network.intermediate_conv2) +
_layer_depth(network.intermediate_residual)
)
# network.output_conv is applied before sending input to the later stages
output_layers = _layer_depth(network.output_conv)
encoder_decoder_layers = sum(_encoder_decoder_depth(net) for net in
network.hourglass_network)
return (input_conv_layers + encoder_decoder_layers + intermediate_layers
+ output_layers)
def hourglass_104():
"""The Hourglass-104 backbone.
The architecture parameters are taken from [1].
Returns:
network: An HourglassNetwork object implementing the Hourglass-104
backbone.
[1]: https://arxiv.org/abs/1904.07850
"""
return HourglassNetwork(
input_channel_dims=128,
channel_dims_per_stage=[256, 256, 384, 384, 384, 512],
num_hourglasses=2,
num_stages=5,
blocks_per_stage=[2, 2, 2, 2, 2, 4],
)
def single_stage_hourglass(input_channel_dims, channel_dims_per_stage,
blocks_per_stage, initial_downsample=True,
stagewise_downsample=True,
encoder_decoder_shortcut=True):
assert len(channel_dims_per_stage) == len(blocks_per_stage)
return HourglassNetwork(
input_channel_dims=input_channel_dims,
channel_dims_per_stage=channel_dims_per_stage,
num_hourglasses=1,
num_stages=len(channel_dims_per_stage) - 1,
blocks_per_stage=blocks_per_stage,
initial_downsample=initial_downsample,
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut
)
def hourglass_10(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[1, 1],
channel_dims_per_stage=[nc * 2, nc * 2])
def hourglass_20(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3])
def hourglass_32(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[2, 2, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3])
def hourglass_52(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[2, 2, 2, 2, 2, 4],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4])
def hourglass_100(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[4, 4, 4, 4, 4, 8],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4])
def hourglass_20_uniform_size(num_channels):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3],
initial_downsample=False,
stagewise_downsample=False)
def hourglass_20_no_shortcut(num_channels):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3],
initial_downsample=False,
encoder_decoder_shortcut=False)
| 21,032 | 32.6528 | 81 | py |
models | models-master/research/object_detection/models/keras_models/model_utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for Keras models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
# This is to specify the custom config of model structures. For example,
# ConvDefs(conv_name='conv_pw_12', filters=512) for Mobilenet V1 is to specify
# the filters of the conv layer with name 'conv_pw_12' as 512.s
ConvDefs = collections.namedtuple('ConvDefs', ['conv_name', 'filters'])
def get_conv_def(conv_defs, layer_name):
"""Get the custom config for some layer of the model structure.
Args:
conv_defs: A named tuple to specify the custom config of the model
network. See `ConvDefs` for details.
layer_name: A string, the name of the layer to be customized.
Returns:
The number of filters for the layer, or `None` if there is no custom
config for the requested layer.
"""
for conv_def in conv_defs:
if layer_name == conv_def.conv_name:
return conv_def.filters
return None
def input_layer(shape, placeholder_with_default):
if tf.executing_eagerly():
return tf.keras.layers.Input(shape=shape)
else:
return tf.keras.layers.Input(tensor=placeholder_with_default)
| 1,913 | 34.444444 | 80 | py |
models | models-master/research/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inception_resnet_v2.py.
This test mainly focuses on comparing slim inception resnet v2 and Keras
inception resnet v2 for object detection. To verify the consistency of the two
models, we compare:
1. Output shape of each layer given different inputs
2. Number of global variables
We also visualize the model structure via Tensorboard, and compare the model
layout and the parameters of each Op to make sure the two implementations are
consistent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.models.keras_models import inception_resnet_v2
from object_detection.utils import test_case
from object_detection.utils import tf_version
_KERAS_TO_SLIM_ENDPOINT_NAMES = {
'activation': 'Conv2d_1a_3x3',
'activation_1': 'Conv2d_2a_3x3',
'activation_2': 'Conv2d_2b_3x3',
'activation_3': 'Conv2d_3b_1x1',
'activation_4': 'Conv2d_4a_3x3',
'max_pooling2d': 'MaxPool_3a_3x3',
'max_pooling2d_1': 'MaxPool_5a_3x3',
'mixed_5b': 'Mixed_5b',
'mixed_6a': 'Mixed_6a',
'block17_20_ac': 'PreAuxLogits',
'mixed_7a': 'Mixed_7a',
'conv_7b_ac': 'Conv2d_7b_1x1',
}
_SLIM_ENDPOINT_SHAPES_128 = {
'Conv2d_1a_3x3': (2, 64, 64, 32),
'Conv2d_2a_3x3': (2, 64, 64, 32),
'Conv2d_2b_3x3': (2, 64, 64, 64),
'Conv2d_3b_1x1': (2, 32, 32, 80),
'Conv2d_4a_3x3': (2, 32, 32, 192),
'Conv2d_7b_1x1': (2, 4, 4, 1536),
'MaxPool_3a_3x3': (2, 32, 32, 64),
'MaxPool_5a_3x3': (2, 16, 16, 192),
'Mixed_5b': (2, 16, 16, 320),
'Mixed_6a': (2, 8, 8, 1088),
'Mixed_7a': (2, 4, 4, 2080),
'PreAuxLogits': (2, 8, 8, 1088)}
_SLIM_ENDPOINT_SHAPES_128_STRIDE_8 = {
'Conv2d_1a_3x3': (2, 64, 64, 32),
'Conv2d_2a_3x3': (2, 64, 64, 32),
'Conv2d_2b_3x3': (2, 64, 64, 64),
'Conv2d_3b_1x1': (2, 32, 32, 80),
'Conv2d_4a_3x3': (2, 32, 32, 192),
'MaxPool_3a_3x3': (2, 32, 32, 64),
'MaxPool_5a_3x3': (2, 16, 16, 192),
'Mixed_5b': (2, 16, 16, 320),
'Mixed_6a': (2, 16, 16, 1088),
'PreAuxLogits': (2, 16, 16, 1088)}
_SLIM_ENDPOINT_SHAPES_128_ALIGN_FEATURE_MAPS_FALSE = {
'Conv2d_1a_3x3': (2, 63, 63, 32),
'Conv2d_2a_3x3': (2, 61, 61, 32),
'Conv2d_2b_3x3': (2, 61, 61, 64),
'Conv2d_3b_1x1': (2, 30, 30, 80),
'Conv2d_4a_3x3': (2, 28, 28, 192),
'Conv2d_7b_1x1': (2, 2, 2, 1536),
'MaxPool_3a_3x3': (2, 30, 30, 64),
'MaxPool_5a_3x3': (2, 13, 13, 192),
'Mixed_5b': (2, 13, 13, 320),
'Mixed_6a': (2, 6, 6, 1088),
'Mixed_7a': (2, 2, 2, 2080),
'PreAuxLogits': (2, 6, 6, 1088)}
_SLIM_ENDPOINT_SHAPES_299 = {}
_SLIM_ENDPOINT_SHAPES_299_STRIDE_8 = {}
_SLIM_ENDPOINT_SHAPES_299_ALIGN_FEATURE_MAPS_FALSE = {}
_KERAS_LAYERS_TO_CHECK = list(_KERAS_TO_SLIM_ENDPOINT_NAMES.keys())
_NUM_CHANNELS = 3
_BATCH_SIZE = 2
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class InceptionResnetV2Test(test_case.TestCase):
def _create_application_with_layer_outputs(
self, layer_names, batchnorm_training,
output_stride=16,
align_feature_maps=False,
batchnorm_scale=False,
weight_decay=0.00004,
default_batchnorm_momentum=0.9997,
default_batchnorm_epsilon=0.001,):
"""Constructs Keras inception_resnet_v2 that extracts layer outputs."""
# Have to clear the Keras backend to ensure isolation in layer naming
tf.keras.backend.clear_session()
if not layer_names:
layer_names = _KERAS_LAYERS_TO_CHECK
full_model = inception_resnet_v2.inception_resnet_v2(
batchnorm_training=batchnorm_training,
output_stride=output_stride,
align_feature_maps=align_feature_maps,
weights=None,
batchnorm_scale=batchnorm_scale,
weight_decay=weight_decay,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
include_top=False)
layer_outputs = [full_model.get_layer(name=layer).output
for layer in layer_names]
return tf.keras.Model(
inputs=full_model.inputs,
outputs=layer_outputs)
def _check_returns_correct_shape(
self, image_height, image_width,
expected_feature_map_shape, layer_names=None, batchnorm_training=True,
output_stride=16,
align_feature_maps=False,
batchnorm_scale=False,
weight_decay=0.00004,
default_batchnorm_momentum=0.9997,
default_batchnorm_epsilon=0.001,):
if not layer_names:
layer_names = _KERAS_LAYERS_TO_CHECK
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=batchnorm_training,
output_stride=output_stride,
align_feature_maps=align_feature_maps,
batchnorm_scale=batchnorm_scale,
weight_decay=weight_decay,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon)
image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width,
_NUM_CHANNELS).astype(np.float32)
feature_maps = model(image_tensor)
for feature_map, layer_name in zip(feature_maps, layer_names):
endpoint_name = _KERAS_TO_SLIM_ENDPOINT_NAMES[layer_name]
expected_shape = expected_feature_map_shape[endpoint_name]
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, layer_names=None):
tf.keras.backend.clear_session()
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False)
preprocessed_inputs = tf.random.uniform([4, 40, 40, _NUM_CHANNELS])
model(preprocessed_inputs)
return model.variables
def test_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
expected_feature_map_shape = (
_SLIM_ENDPOINT_SHAPES_128)
self._check_returns_correct_shape(
image_height, image_width, expected_feature_map_shape,
align_feature_maps=True)
def test_returns_correct_shapes_128_output_stride_8(self):
image_height = 128
image_width = 128
expected_feature_map_shape = (
_SLIM_ENDPOINT_SHAPES_128_STRIDE_8)
# Output stride of 8 not defined beyond 'block17_20_ac', which is
# PreAuxLogits in slim. So, we exclude those layers in our Keras vs Slim
# comparison.
excluded_layers = {'mixed_7a', 'conv_7b_ac'}
layer_names = [l for l in _KERAS_LAYERS_TO_CHECK
if l not in excluded_layers]
self._check_returns_correct_shape(
image_height, image_width, expected_feature_map_shape,
layer_names=layer_names, output_stride=8, align_feature_maps=True)
def test_returns_correct_shapes_128_align_feature_maps_false(
self):
image_height = 128
image_width = 128
expected_feature_map_shape = (
_SLIM_ENDPOINT_SHAPES_128_ALIGN_FEATURE_MAPS_FALSE)
self._check_returns_correct_shape(
image_height, image_width, expected_feature_map_shape,
align_feature_maps=False)
def test_hyperparam_override(self):
model = inception_resnet_v2.inception_resnet_v2(
batchnorm_training=True,
default_batchnorm_momentum=0.2,
default_batchnorm_epsilon=0.1,
weights=None,
include_top=False)
bn_layer = model.get_layer(name='freezable_batch_norm')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
variables = self._get_variables()
# 896 is the number of variables from slim inception resnet v2 model.
self.assertEqual(len(variables), 896)
if __name__ == '__main__':
tf.test.main()
| 8,487 | 36.22807 | 80 | py |
models | models-master/research/object_detection/models/keras_models/convert_keras_models.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Write keras weights into a tensorflow checkpoint.
The imagenet weights in `keras.applications` are downloaded from github.
This script converts them into the tensorflow checkpoint format and stores them
on disk where they can be easily accessible during training.
"""
from __future__ import print_function
import os
from absl import app
import numpy as np
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('model', 'resnet_v2_101',
'The model to load. The following are supported: '
'"resnet_v1_50", "resnet_v1_101", "resnet_v2_50", '
'"resnet_v2_101"')
tf.flags.DEFINE_string('output_path', None,
'The directory to output weights in.')
tf.flags.DEFINE_boolean('verify_weights', True,
('Verify the weights are loaded correctly by making '
'sure the predictions are the same before and after '
'saving.'))
def init_model(name):
"""Creates a Keras Model with the specific ResNet version."""
if name == 'resnet_v1_50':
model = tf.keras.applications.ResNet50(weights='imagenet')
elif name == 'resnet_v1_101':
model = tf.keras.applications.ResNet101(weights='imagenet')
elif name == 'resnet_v2_50':
model = tf.keras.applications.ResNet50V2(weights='imagenet')
elif name == 'resnet_v2_101':
model = tf.keras.applications.ResNet101V2(weights='imagenet')
else:
raise ValueError('Model {} not supported'.format(FLAGS.model))
return model
def main(_):
model = init_model(FLAGS.model)
path = os.path.join(FLAGS.output_path, FLAGS.model)
tf.gfile.MakeDirs(path)
weights_path = os.path.join(path, 'weights')
ckpt = tf.train.Checkpoint(feature_extractor=model)
saved_path = ckpt.save(weights_path)
if FLAGS.verify_weights:
imgs = np.random.randn(1, 224, 224, 3).astype(np.float32)
keras_preds = model(imgs)
model = init_model(FLAGS.model)
ckpt.restore(saved_path)
loaded_weights_pred = model(imgs).numpy()
if not np.all(np.isclose(keras_preds, loaded_weights_pred)):
raise RuntimeError('The model was not saved correctly.')
if __name__ == '__main__':
tf.enable_v2_behavior()
app.run(main)
| 2,964 | 33.476744 | 80 | py |
models | models-master/research/object_detection/models/keras_models/nonlocal_block.py | """Layer for Non-Local operation.
This is a building block which mimics self-attention in a feature map.
For more information, please see https://arxiv.org/pdf/1711.07971.pdf
"""
import tensorflow as tf
from object_detection.utils import shape_utils
class NonLocalBlock(tf.keras.layers.Layer):
"""A Non-local block."""
def __init__(self, bottleneck_channels, pairwise_fn='dot', pool_size=None,
add_coord_conv=False):
"""Constructor.
Args:
bottleneck_channels: The number of channels used to do pairwise
comparisons at each feature location.
pairwise_fn: The pairwise comparison function. Currently supports
'dot' and 'embedded_softmax'.
pool_size: The downsample size (achieved with max pool) used prior to
doing pairwise comparisons. This does not affect the shape of the output
tensor, but reduces computation. For a pool_size of 2, computation is
dropped by a factor of 4. If None, no downsampling is performed.
add_coord_conv: Concatenates a 2-channel feature map with normalized
coordinates (in range [-1, 1]) to the input, prior to the
non-local block.
Raises:
RuntimeError: If self._pairwise_fn is not one of "dot" or
"embedded_softmax".
"""
super().__init__()
self._bottleneck_channels = bottleneck_channels
self._add_coord_conv = add_coord_conv
self._pool_size = pool_size
if pairwise_fn not in ('dot', 'embedded_softmax'):
raise RuntimeError('pairwise_fn must be one of "dot" or '
'"embedded_softmax"')
self._pairwise_fn = pairwise_fn
def build(self, input_shape):
channels = input_shape[-1]
self.queries_conv = tf.keras.layers.Conv2D(
filters=self._bottleneck_channels, kernel_size=1, use_bias=False,
strides=1, padding='same')
self.keys_conv = tf.keras.layers.Conv2D(
filters=self._bottleneck_channels, kernel_size=1, use_bias=False,
strides=1, padding='same')
self.values_conv = tf.keras.layers.Conv2D(
filters=self._bottleneck_channels, kernel_size=1, use_bias=False,
strides=1, padding='same')
self.expand_conv = tf.keras.layers.Conv2D(
filters=channels, kernel_size=1, use_bias=False, strides=1,
padding='same')
self.batchnorm = tf.keras.layers.BatchNormalization(
name='batchnorm', epsilon=1e-5, momentum=0.1, fused=True,
beta_initializer='zeros', gamma_initializer='zeros')
if self._pool_size:
self.maxpool_keys = tf.keras.layers.MaxPool2D(
pool_size=(self._pool_size, self._pool_size))
self.maxpool_values = tf.keras.layers.MaxPool2D(
pool_size=(self._pool_size, self._pool_size))
def call(self, inputs):
"""Applies a non-local block to an input feature map.
Args:
inputs: A [batch, height, width, channels] float32 input tensor.
Returns:
An output tensor of the same shape as the input.
"""
batch, height, width, _ = shape_utils.combined_static_and_dynamic_shape(
inputs)
x = inputs
if self._add_coord_conv:
coords_x, coords_y = tf.meshgrid(tf.linspace(-1., 1., height),
tf.linspace(-1., 1., width))
coords = tf.stack([coords_y, coords_x], axis=-1)
coords = tf.tile(coords[tf.newaxis, :, :, :],
multiples=[batch, 1, 1, 1])
x = tf.concat([x, coords], axis=-1)
# shape: [B, H, W, bottleneck_channels].
queries = self.queries_conv(x)
# shape: [B, H, W, bottleneck_channels].
keys = self.keys_conv(x)
# shape: [B, H, W, bottleneck_channels].
values = self.values_conv(x)
keys_height, keys_width = height, width
if self._pool_size:
keys_height = height // self._pool_size
keys_width = width // self._pool_size
# shape: [B, H', W', bottleneck_channels].
keys = self.maxpool_keys(keys)
values = self.maxpool_values(values)
# Produce pairwise scores.
queries = tf.reshape(
queries, [batch, height * width, self._bottleneck_channels])
keys = tf.reshape(
keys, [batch, keys_height * keys_width, self._bottleneck_channels])
# shape = [B, H*W, H'*W'].
scores = tf.linalg.matmul(queries, keys, transpose_b=True)
if self._pairwise_fn == 'dot':
normalization = tf.cast(height * width, dtype=tf.float32)
scores = (1./normalization) * scores
elif self._pairwise_fn == 'embedded_softmax':
scores = tf.nn.softmax(scores, axis=-1)
# Multiply scores with values.
# shape = [B, H'*W', bottleneck_channels].
values = tf.reshape(
values, [batch, keys_height * keys_width, self._bottleneck_channels])
# shape = [B, H, W, bottleneck_channels].
weighted_values = tf.linalg.matmul(scores, values)
weighted_values = tf.reshape(
weighted_values, [batch, height, width, self._bottleneck_channels])
# Construct residual.
expand = self.batchnorm(self.expand_conv(weighted_values))
output = expand + inputs
return output
| 5,069 | 37.409091 | 80 | py |
models | models-master/research/object_detection/models/keras_models/mobilenet_v1_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v1.py.
This test mainly focuses on comparing slim MobilenetV1 and Keras MobilenetV1 for
object detection. To verify the consistency of the two models, we compare:
1. Output shape of each layer given different inputs
2. Number of global variables
We also visualize the model structure via Tensorboard, and compare the model
layout and the parameters of each Op to make sure the two implementations are
consistent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models.keras_models import mobilenet_v1
from object_detection.models.keras_models import model_utils
from object_detection.models.keras_models import test_utils
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
_KERAS_LAYERS_TO_CHECK = [
'conv1_relu',
'conv_dw_1_relu', 'conv_pw_1_relu',
'conv_dw_2_relu', 'conv_pw_2_relu',
'conv_dw_3_relu', 'conv_pw_3_relu',
'conv_dw_4_relu', 'conv_pw_4_relu',
'conv_dw_5_relu', 'conv_pw_5_relu',
'conv_dw_6_relu', 'conv_pw_6_relu',
'conv_dw_7_relu', 'conv_pw_7_relu',
'conv_dw_8_relu', 'conv_pw_8_relu',
'conv_dw_9_relu', 'conv_pw_9_relu',
'conv_dw_10_relu', 'conv_pw_10_relu',
'conv_dw_11_relu', 'conv_pw_11_relu',
'conv_dw_12_relu', 'conv_pw_12_relu',
'conv_dw_13_relu', 'conv_pw_13_relu',
]
_NUM_CHANNELS = 3
_BATCH_SIZE = 2
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MobilenetV1Test(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
train: true,
scale: false,
center: true,
decay: 0.2,
epsilon: 0.1,
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_application_with_layer_outputs(
self, layer_names, batchnorm_training,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None):
"""Constructs Keras MobilenetV1 that extracts intermediate layer outputs."""
if not layer_names:
layer_names = _KERAS_LAYERS_TO_CHECK
full_model = mobilenet_v1.mobilenet_v1(
batchnorm_training=batchnorm_training,
conv_hyperparams=conv_hyperparams,
weights=None,
use_explicit_padding=use_explicit_padding,
alpha=alpha,
min_depth=min_depth,
conv_defs=conv_defs,
include_top=False)
layer_outputs = [full_model.get_layer(name=layer).output
for layer in layer_names]
return tf.keras.Model(
inputs=full_model.inputs,
outputs=layer_outputs)
def _check_returns_correct_shape(
self, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=False, min_depth=8,
layer_names=None, conv_defs=None):
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False,
use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=depth_multiplier,
conv_defs=conv_defs)
image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width,
_NUM_CHANNELS).astype(np.float32)
feature_maps = model(image_tensor)
for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shape):
self.assertAllEqual(feature_map.shape, expected_shape)
def _check_returns_correct_shapes_with_dynamic_inputs(
self, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=False, min_depth=8,
layer_names=None):
image_tensor = tf.random_uniform([_BATCH_SIZE, image_height, image_width,
_NUM_CHANNELS], dtype=tf.float32)
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False,
use_explicit_padding=use_explicit_padding,
alpha=depth_multiplier)
feature_maps = model(image_tensor)
for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shape):
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, depth_multiplier, layer_names=None):
tf.keras.backend.clear_session()
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=False,
alpha=depth_multiplier)
preprocessed_inputs = tf.random.uniform([2, 40, 40, 3])
model(preprocessed_inputs)
return model.variables
def test_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_128)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape)
def test_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_128_explicit_padding)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape,
use_explicit_padding=True)
def test_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.mobilenet_v1_expected_feature_map_shape_with_dynamic_inputs)
self._check_returns_correct_shapes_with_dynamic_inputs(
image_height, image_width, depth_multiplier, expected_feature_map_shape)
def test_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_299)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape)
def test_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_enforcing_min_depth)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape)
def test_returns_correct_shapes_with_conv_defs(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
conv_def_block_12 = model_utils.ConvDefs(
conv_name='conv_pw_12', filters=512)
conv_def_block_13 = model_utils.ConvDefs(
conv_name='conv_pw_13', filters=256)
conv_defs = [conv_def_block_12, conv_def_block_13]
expected_feature_map_shape = (
test_utils.moblenet_v1_expected_feature_map_shape_with_conv_defs)
self._check_returns_correct_shape(
image_height, image_width, depth_multiplier, expected_feature_map_shape,
conv_defs=conv_defs)
def test_hyperparam_override(self):
hyperparams = self._build_conv_hyperparams()
model = mobilenet_v1.mobilenet_v1(
batchnorm_training=True,
conv_hyperparams=hyperparams,
weights=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=32,
include_top=False)
hyperparams.params()
bn_layer = model.get_layer(name='conv_pw_5_bn')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
depth_multiplier = 1
variables = self._get_variables(depth_multiplier)
# 135 is the number of variables from slim MobilenetV1 model.
self.assertEqual(len(variables), 135)
if __name__ == '__main__':
tf.test.main()
| 9,254 | 35.152344 | 80 | py |
models | models-master/research/object_detection/models/keras_models/inception_resnet_v2.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around the Keras InceptionResnetV2 models for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.core import freezable_batch_norm
class _LayersOverride(object):
"""Alternative Keras layers interface for the Keras InceptionResNetV2."""
def __init__(self,
batchnorm_training,
output_stride=16,
align_feature_maps=False,
batchnorm_scale=False,
default_batchnorm_momentum=0.999,
default_batchnorm_epsilon=1e-3,
weight_decay=0.00004):
"""Alternative tf.keras.layers interface, for use by InceptionResNetV2.
It is used by the Keras applications kwargs injection API to
modify the Inception Resnet V2 Keras application with changes required by
the Object Detection API.
These injected interfaces make the following changes to the network:
- Supports freezing batch norm layers
- Adds support for feature map alignment (like in the Slim model)
- Adds support for changing the output stride (like in the Slim model)
- Adds support for overriding various batch norm hyperparameters
Because the Keras inception resnet v2 application does not assign explicit
names to most individual layers, the injection of output stride support
works by identifying convolution layers according to their filter counts
and pre-feature-map-alignment padding arguments.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
output_stride: A scalar that specifies the requested ratio of input to
output spatial resolution. Only supports 8 and 16.
align_feature_maps: When true, changes all the VALID paddings in the
network to SAME padding so that the feature maps are aligned.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
default_batchnorm_momentum: Float. Batch norm layers will be constructed
using this value as the momentum.
default_batchnorm_epsilon: small float added to variance to avoid
dividing by zero.
weight_decay: the l2 regularization weight decay for weights variables.
(gets multiplied by 0.5 to map from slim l2 regularization weight to
Keras l2 regularization weight).
"""
self._use_atrous = output_stride == 8
self._align_feature_maps = align_feature_maps
self._batchnorm_training = batchnorm_training
self._batchnorm_scale = batchnorm_scale
self._default_batchnorm_momentum = default_batchnorm_momentum
self._default_batchnorm_epsilon = default_batchnorm_epsilon
self.regularizer = tf.keras.regularizers.l2(weight_decay * 0.5)
def Conv2D(self, filters, kernel_size, **kwargs):
"""Builds a Conv2D layer according to the current Object Detection config.
Overrides the Keras InceptionResnetV2 application's convolutions with ones
that follow the spec specified by the Object Detection hyperparameters.
If feature map alignment is enabled, the padding will be forced to 'same'.
If output_stride is 8, some conv2d layers will be matched according to
their name or filter counts or pre-alignment padding parameters, and will
have the correct 'dilation rate' or 'strides' set.
Args:
filters: The number of filters to use for the convolution.
kernel_size: The kernel size to specify the height and width of the 2D
convolution window.
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A Keras Conv2D layer specified by the Object Detection hyperparameter
configurations.
"""
kwargs['kernel_regularizer'] = self.regularizer
kwargs['bias_regularizer'] = self.regularizer
# Because the Keras application does not set explicit names for most layers,
# (instead allowing names to auto-increment), we must match individual
# layers in the model according to their filter count, name, or
# pre-alignment mapping. This means we can only align the feature maps
# after we have applied our updates in cases where output_stride=8.
if self._use_atrous and (filters == 384):
kwargs['strides'] = 1
name = kwargs.get('name')
if self._use_atrous and (
(name and 'block17' in name) or
(filters == 128 or filters == 160 or
(filters == 192 and kwargs.get('padding', '').lower() != 'valid'))):
kwargs['dilation_rate'] = 2
if self._align_feature_maps:
kwargs['padding'] = 'same'
return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs)
def MaxPooling2D(self, pool_size, strides, **kwargs):
"""Builds a pooling layer according to the current Object Detection config.
Overrides the Keras InceptionResnetV2 application's MaxPooling2D layers with
ones that follow the spec specified by the Object Detection hyperparameters.
If feature map alignment is enabled, the padding will be forced to 'same'.
If output_stride is 8, some pooling layers will be matched according to
their pre-alignment padding parameters, and will have their 'strides'
argument overridden.
Args:
pool_size: The pool size specified by the Keras application.
strides: The strides specified by the unwrapped Keras application.
**kwargs: Keyword args specified by the Keras application for
constructing the max pooling layer.
Returns:
A MaxPool2D layer specified by the Object Detection hyperparameter
configurations.
"""
if self._use_atrous and kwargs.get('padding', '').lower() == 'valid':
strides = 1
if self._align_feature_maps:
kwargs['padding'] = 'same'
return tf.keras.layers.MaxPool2D(pool_size, strides=strides, **kwargs)
# We alias MaxPool2D because Keras has that alias
MaxPool2D = MaxPooling2D # pylint: disable=invalid-name
def BatchNormalization(self, **kwargs):
"""Builds a normalization layer.
Overrides the Keras application batch norm with the norm specified by the
Object Detection configuration.
Args:
**kwargs: Keyword arguments from the `layers.BatchNormalization` calls in
the Keras application.
Returns:
A normalization layer specified by the Object Detection hyperparameter
configurations.
"""
kwargs['scale'] = self._batchnorm_scale
return freezable_batch_norm.FreezableBatchNorm(
training=self._batchnorm_training,
epsilon=self._default_batchnorm_epsilon,
momentum=self._default_batchnorm_momentum,
**kwargs)
# Forward all non-overridden methods to the keras layers
def __getattr__(self, item):
return getattr(tf.keras.layers, item)
# pylint: disable=invalid-name
def inception_resnet_v2(
batchnorm_training,
output_stride=16,
align_feature_maps=False,
batchnorm_scale=False,
weight_decay=0.00004,
default_batchnorm_momentum=0.9997,
default_batchnorm_epsilon=0.001,
**kwargs):
"""Instantiates the InceptionResnetV2 architecture.
(Modified for object detection)
This wraps the InceptionResnetV2 tensorflow Keras application, but uses the
Keras application's kwargs-based monkey-patching API to override the Keras
architecture with the following changes:
- Supports freezing batch norm layers with FreezableBatchNorms
- Adds support for feature map alignment (like in the Slim model)
- Adds support for changing the output stride (like in the Slim model)
- Changes the default batchnorm momentum to 0.9997
- Adds support for overriding various batchnorm hyperparameters
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
output_stride: A scalar that specifies the requested ratio of input to
output spatial resolution. Only supports 8 and 16.
align_feature_maps: When true, changes all the VALID paddings in the
network to SAME padding so that the feature maps are aligned.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
weight_decay: the l2 regularization weight decay for weights variables.
(gets multiplied by 0.5 to map from slim l2 regularization weight to
Keras l2 regularization weight).
default_batchnorm_momentum: Float. Batch norm layers will be constructed
using this value as the momentum.
default_batchnorm_epsilon: small float added to variance to avoid
dividing by zero.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.InceptionResNetV2` method that constructs the
Keras model.
Returns:
A Keras model instance.
"""
if output_stride != 8 and output_stride != 16:
raise ValueError('output_stride must be 8 or 16.')
layers_override = _LayersOverride(
batchnorm_training,
output_stride,
align_feature_maps=align_feature_maps,
batchnorm_scale=batchnorm_scale,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
weight_decay=weight_decay)
return tf.keras.applications.InceptionResNetV2(
layers=layers_override, **kwargs)
# pylint: enable=invalid-name
| 10,309 | 41.081633 | 80 | py |
models | models-master/research/object_detection/models/keras_models/resnet_v1_tf2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resnet_v1.py.
This test mainly focuses on comparing slim resnet v1 and Keras resnet v1 for
object detection. To verify the consistency of the two models, we compare:
1. Output shape of each layer given different inputs.
2. Number of global variables.
"""
import unittest
from absl.testing import parameterized
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models.keras_models import resnet_v1
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
_EXPECTED_SHAPES_224_RESNET50 = {
'conv2_block3_out': (4, 56, 56, 256),
'conv3_block4_out': (4, 28, 28, 512),
'conv4_block6_out': (4, 14, 14, 1024),
'conv5_block3_out': (4, 7, 7, 2048),
}
_EXPECTED_SHAPES_224_RESNET101 = {
'conv2_block3_out': (4, 56, 56, 256),
'conv3_block4_out': (4, 28, 28, 512),
'conv4_block23_out': (4, 14, 14, 1024),
'conv5_block3_out': (4, 7, 7, 2048),
}
_EXPECTED_SHAPES_224_RESNET152 = {
'conv2_block3_out': (4, 56, 56, 256),
'conv3_block8_out': (4, 28, 28, 512),
'conv4_block36_out': (4, 14, 14, 1024),
'conv5_block3_out': (4, 7, 7, 2048),
}
_RESNET_NAMES = ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']
_RESNET_MODELS = [
resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101, resnet_v1.resnet_v1_152
]
_RESNET_SHAPES = [
_EXPECTED_SHAPES_224_RESNET50, _EXPECTED_SHAPES_224_RESNET101,
_EXPECTED_SHAPES_224_RESNET152
]
_NUM_CHANNELS = 3
_BATCH_SIZE = 4
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ResnetV1Test(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_application_with_layer_outputs(self,
model_index,
batchnorm_training,
batchnorm_scale=True,
weight_decay=0.0001,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5):
"""Constructs Keras resnet_v1 that extracts layer outputs."""
# Have to clear the Keras backend to ensure isolation in layer naming
tf.keras.backend.clear_session()
layer_names = _RESNET_SHAPES[model_index].keys()
full_model = _RESNET_MODELS[model_index](
batchnorm_training=batchnorm_training,
weights=None,
batchnorm_scale=batchnorm_scale,
weight_decay=weight_decay,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
include_top=False)
layer_outputs = [
full_model.get_layer(name=layer).output for layer in layer_names
]
return tf.keras.Model(inputs=full_model.inputs, outputs=layer_outputs)
def _check_returns_correct_shape(self,
image_height,
image_width,
model_index,
expected_feature_map_shape,
batchnorm_training=True,
batchnorm_scale=True,
weight_decay=0.0001,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5):
model = self._create_application_with_layer_outputs(
model_index=model_index,
batchnorm_training=batchnorm_training,
batchnorm_scale=batchnorm_scale,
weight_decay=weight_decay,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon)
image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width,
_NUM_CHANNELS).astype(np.float32)
feature_maps = model(image_tensor)
layer_names = _RESNET_SHAPES[model_index].keys()
for feature_map, layer_name in zip(feature_maps, layer_names):
expected_shape = _RESNET_SHAPES[model_index][layer_name]
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, model_index):
tf.keras.backend.clear_session()
model = self._create_application_with_layer_outputs(
model_index, batchnorm_training=False)
preprocessed_inputs = tf.random.uniform([2, 40, 40, _NUM_CHANNELS])
model(preprocessed_inputs)
return model.variables
def test_returns_correct_shapes_224(self):
image_height = 224
image_width = 224
for model_index, _ in enumerate(_RESNET_NAMES):
expected_feature_map_shape = _RESNET_SHAPES[model_index]
self._check_returns_correct_shape(image_height, image_width, model_index,
expected_feature_map_shape)
def test_hyperparam_override(self):
for model_name in _RESNET_MODELS:
model = model_name(
batchnorm_training=True,
default_batchnorm_momentum=0.2,
default_batchnorm_epsilon=0.1,
weights=None,
include_top=False)
bn_layer = model.get_layer(name='conv1_bn')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
# The number of variables from slim resnetv1-* model.
variable_nums = [265, 520, 775]
for model_index, var_num in enumerate(variable_nums):
variables = self._get_variables(model_index)
self.assertEqual(len(variables), var_num)
class ResnetShapeTest(test_case.TestCase, parameterized.TestCase):
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
@parameterized.parameters(
{
'resnet_type':
'resnet_v1_34',
'output_layer_names': [
'conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out',
'conv5_block3_out'
]
}, {
'resnet_type':
'resnet_v1_18',
'output_layer_names': [
'conv2_block2_out', 'conv3_block2_out', 'conv4_block2_out',
'conv5_block2_out'
]
})
def test_output_shapes(self, resnet_type, output_layer_names):
if resnet_type == 'resnet_v1_34':
model = resnet_v1.resnet_v1_34(input_shape=(64, 64, 3), weights=None)
else:
model = resnet_v1.resnet_v1_18(input_shape=(64, 64, 3), weights=None)
outputs = [
model.get_layer(output_layer_name).output
for output_layer_name in output_layer_names
]
resnet_model = tf.keras.models.Model(inputs=model.input, outputs=outputs)
outputs = resnet_model(np.zeros((2, 64, 64, 3), dtype=np.float32))
# Check the shape of 'conv2_block3_out':
self.assertEqual(outputs[0].shape, [2, 16, 16, 64])
# Check the shape of 'conv3_block4_out':
self.assertEqual(outputs[1].shape, [2, 8, 8, 128])
# Check the shape of 'conv4_block6_out':
self.assertEqual(outputs[2].shape, [2, 4, 4, 256])
# Check the shape of 'conv5_block3_out':
self.assertEqual(outputs[3].shape, [2, 2, 2, 512])
if __name__ == '__main__':
tf.test.main()
| 8,565 | 36.735683 | 80 | py |
models | models-master/research/object_detection/models/keras_models/mobilenet_v1.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around the Keras MobilenetV1 models for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.core import freezable_batch_norm
from object_detection.models.keras_models import model_utils
def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
class _LayersOverride(object):
"""Alternative Keras layers interface for the Keras MobileNetV1."""
def __init__(self,
batchnorm_training,
default_batchnorm_momentum=0.999,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None):
"""Alternative tf.keras.layers interface, for use by the Keras MobileNetV1.
It is used by the Keras applications kwargs injection API to
modify the MobilenetV1 Keras application with changes required by
the Object Detection API.
These injected interfaces make the following changes to the network:
- Applies the Object Detection hyperparameter configuration
- Supports FreezableBatchNorms
- Adds support for a min number of filters for each layer
- Makes the `alpha` parameter affect the final convolution block even if it
is less than 1.0
- Adds support for explicit padding of convolutions
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default mobilenet_v1 layer builders.
use_explicit_padding: If True, use 'valid' padding for convolutions,
but explicitly pre-pads inputs so that the output dimensions are the
same as if 'same' padding were used. Off by default.
alpha: The width multiplier referenced in the MobileNetV1 paper. It
modifies the number of filters in each convolutional layer. It's called
depth multiplier in Keras application MobilenetV1.
min_depth: Minimum number of filters in the convolutional layers.
conv_defs: Network layout to specify the mobilenet_v1 body. Default is
`None` to use the default mobilenet_v1 network layout.
"""
self._alpha = alpha
self._batchnorm_training = batchnorm_training
self._default_batchnorm_momentum = default_batchnorm_momentum
self._conv_hyperparams = conv_hyperparams
self._use_explicit_padding = use_explicit_padding
self._min_depth = min_depth
self._conv_defs = conv_defs
self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5)
self.initializer = tf.truncated_normal_initializer(stddev=0.09)
def _FixedPaddingLayer(self, kernel_size, rate=1):
return tf.keras.layers.Lambda(
lambda x: _fixed_padding(x, kernel_size, rate))
def Conv2D(self, filters, kernel_size, **kwargs):
"""Builds a Conv2D layer according to the current Object Detection config.
Overrides the Keras MobileNetV1 application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
filters: The number of filters to use for the convolution.
kernel_size: The kernel size to specify the height and width of the 2D
convolution window. In this function, the kernel size is expected to
be pair of numbers and the numbers must be equal for this function.
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras Conv2D layer to
the input argument, or that will first pad the input then apply a Conv2D
layer.
Raises:
ValueError: if kernel size is not a pair of equal
integers (representing a square kernel).
"""
if not isinstance(kernel_size, tuple):
raise ValueError('kernel is expected to be a tuple.')
if len(kernel_size) != 2:
raise ValueError('kernel is expected to be length two.')
if kernel_size[0] != kernel_size[1]:
raise ValueError('kernel is expected to be square.')
layer_name = kwargs['name']
if self._conv_defs:
conv_filters = model_utils.get_conv_def(self._conv_defs, layer_name)
if conv_filters:
filters = conv_filters
# Apply the width multiplier and the minimum depth to the convolution layers
filters = int(filters * self._alpha)
if self._min_depth and filters < self._min_depth:
filters = self._min_depth
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
else:
kwargs['kernel_regularizer'] = self.regularizer
kwargs['kernel_initializer'] = self.initializer
kwargs['padding'] = 'same'
if self._use_explicit_padding and kernel_size[0] > 1:
kwargs['padding'] = 'valid'
def padded_conv(features): # pylint: disable=invalid-name
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.Conv2D(
filters, kernel_size, **kwargs)(padded_features)
return padded_conv
else:
return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs)
def DepthwiseConv2D(self, kernel_size, **kwargs):
"""Builds a DepthwiseConv2D according to the Object Detection config.
Overrides the Keras MobileNetV2 application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
kernel_size: The kernel size to specify the height and width of the 2D
convolution window.
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras DepthwiseConv2D
layer to the input argument, or that will first pad the input then apply
the depthwise convolution.
"""
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
# Both regularizer and initializaer also applies to depthwise layer in
# MobilenetV1, so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
else:
kwargs['depthwise_regularizer'] = self.regularizer
kwargs['depthwise_initializer'] = self.initializer
kwargs['padding'] = 'same'
if self._use_explicit_padding:
kwargs['padding'] = 'valid'
def padded_depthwise_conv(features): # pylint: disable=invalid-name
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.DepthwiseConv2D(
kernel_size, **kwargs)(padded_features)
return padded_depthwise_conv
else:
return tf.keras.layers.DepthwiseConv2D(kernel_size, **kwargs)
def BatchNormalization(self, **kwargs):
"""Builds a normalization layer.
Overrides the Keras application batch norm with the norm specified by the
Object Detection configuration.
Args:
**kwargs: Only the name is used, all other params ignored.
Required for matching `layers.BatchNormalization` calls in the Keras
application.
Returns:
A normalization layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_batch_norm(
training=self._batchnorm_training,
name=name)
else:
return freezable_batch_norm.FreezableBatchNorm(
training=self._batchnorm_training,
epsilon=1e-3,
momentum=self._default_batchnorm_momentum,
name=name)
def Input(self, shape):
"""Builds an Input layer.
Overrides the Keras application Input layer with one that uses a
tf.placeholder_with_default instead of a tf.placeholder. This is necessary
to ensure the application works when run on a TPU.
Args:
shape: The shape for the input layer to use. (Does not include a dimension
for the batch size).
Returns:
An input layer for the specified shape that internally uses a
placeholder_with_default.
"""
default_size = 224
default_batch_size = 1
shape = list(shape)
default_shape = [default_size if dim is None else dim for dim in shape]
input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape)
placeholder_with_default = tf.placeholder_with_default(
input=input_tensor, shape=[None] + shape)
return model_utils.input_layer(shape, placeholder_with_default)
# pylint: disable=unused-argument
def ReLU(self, *args, **kwargs):
"""Builds an activation layer.
Overrides the Keras application ReLU with the activation specified by the
Object Detection configuration.
Args:
*args: Ignored, required to match the `tf.keras.ReLU` interface
**kwargs: Only the name is used,
required to match `tf.keras.ReLU` interface
Returns:
An activation layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_activation_layer(name=name)
else:
return tf.keras.layers.Lambda(tf.nn.relu6, name=name)
# pylint: enable=unused-argument
# pylint: disable=unused-argument
def ZeroPadding2D(self, padding, **kwargs):
"""Replaces explicit padding in the Keras application with a no-op.
Args:
padding: The padding values for image height and width.
**kwargs: Ignored, required to match the Keras applications usage.
Returns:
A no-op identity lambda.
"""
return lambda x: x
# pylint: enable=unused-argument
# Forward all non-overridden methods to the keras layers
def __getattr__(self, item):
return getattr(tf.keras.layers, item)
# pylint: disable=invalid-name
def mobilenet_v1(batchnorm_training,
default_batchnorm_momentum=0.9997,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None,
**kwargs):
"""Instantiates the MobileNetV1 architecture, modified for object detection.
This wraps the MobileNetV1 tensorflow Keras application, but uses the
Keras application's kwargs-based monkey-patching API to override the Keras
architecture with the following changes:
- Changes the default batchnorm momentum to 0.9997
- Applies the Object Detection hyperparameter configuration
- Supports FreezableBatchNorms
- Adds support for a min number of filters for each layer
- Makes the `alpha` parameter affect the final convolution block even if it
is less than 1.0
- Adds support for explicit padding of convolutions
- Makes the Input layer use a tf.placeholder_with_default instead of a
tf.placeholder, to work on TPUs.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default mobilenet_v1 layer builders.
use_explicit_padding: If True, use 'valid' padding for convolutions,
but explicitly pre-pads inputs so that the output dimensions are the
same as if 'same' padding were used. Off by default.
alpha: The width multiplier referenced in the MobileNetV1 paper. It
modifies the number of filters in each convolutional layer.
min_depth: Minimum number of filters in the convolutional layers.
conv_defs: Network layout to specify the mobilenet_v1 body. Default is
`None` to use the default mobilenet_v1 network layout.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
default_batchnorm_momentum=default_batchnorm_momentum,
conv_hyperparams=conv_hyperparams,
use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=alpha,
conv_defs=conv_defs)
return tf.keras.applications.MobileNet(
alpha=alpha, layers=layers_override, **kwargs)
# pylint: enable=invalid-name
| 14,960 | 40.674095 | 80 | py |
models | models-master/research/object_detection/models/keras_models/nonlocal_block_tf2_test.py | """Tests for google3.third_party.tensorflow_models.object_detection.models.keras_models.nonlocal_block."""
import unittest
from absl.testing import parameterized
import tensorflow as tf
from object_detection.models.keras_models import nonlocal_block
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class NonlocalTest(test_case.TestCase, parameterized.TestCase):
@parameterized.parameters([{'pool_size': None,
'add_coord_conv': False},
{'pool_size': None,
'add_coord_conv': True},
{'pool_size': 2,
'add_coord_conv': False},
{'pool_size': 2,
'add_coord_conv': True}])
def test_run_nonlocal_block(self, pool_size, add_coord_conv):
nonlocal_op = nonlocal_block.NonLocalBlock(
8, pool_size=pool_size, add_coord_conv=add_coord_conv)
def graph_fn():
inputs = tf.zeros((4, 16, 16, 32), dtype=tf.float32)
outputs = nonlocal_op(inputs)
return outputs
outputs = self.execute(graph_fn, [])
self.assertAllEqual([4, 16, 16, 32], outputs.shape)
if __name__ == '__main__':
tf.test.main()
| 1,343 | 37.4 | 106 | py |
models | models-master/research/object_detection/models/keras_models/base_models/original_mobilenet_v2.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet v2 models for Keras.
MobileNetV2 is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 22 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.35, 0.5, 0.75, 1.0, 1.3, and 1.4
For each of these `alpha` values, weights for 5 different input image sizes
are provided (224, 192, 160, 128, and 96).
The following table describes the performance of
MobileNet on various input sizes:
------------------------------------------------------------------------
MACs stands for Multiply Adds
Classification Checkpoint| MACs (M) | Parameters (M)| Top 1 Acc| Top 5 Acc
--------------------------|------------|---------------|---------|----|-------
| [mobilenet_v2_1.4_224] | 582 | 6.06 | 75.0 | 92.5 |
| [mobilenet_v2_1.3_224] | 509 | 5.34 | 74.4 | 92.1 |
| [mobilenet_v2_1.0_224] | 300 | 3.47 | 71.8 | 91.0 |
| [mobilenet_v2_1.0_192] | 221 | 3.47 | 70.7 | 90.1 |
| [mobilenet_v2_1.0_160] | 154 | 3.47 | 68.8 | 89.0 |
| [mobilenet_v2_1.0_128] | 99 | 3.47 | 65.3 | 86.9 |
| [mobilenet_v2_1.0_96] | 56 | 3.47 | 60.3 | 83.2 |
| [mobilenet_v2_0.75_224] | 209 | 2.61 | 69.8 | 89.6 |
| [mobilenet_v2_0.75_192] | 153 | 2.61 | 68.7 | 88.9 |
| [mobilenet_v2_0.75_160] | 107 | 2.61 | 66.4 | 87.3 |
| [mobilenet_v2_0.75_128] | 69 | 2.61 | 63.2 | 85.3 |
| [mobilenet_v2_0.75_96] | 39 | 2.61 | 58.8 | 81.6 |
| [mobilenet_v2_0.5_224] | 97 | 1.95 | 65.4 | 86.4 |
| [mobilenet_v2_0.5_192] | 71 | 1.95 | 63.9 | 85.4 |
| [mobilenet_v2_0.5_160] | 50 | 1.95 | 61.0 | 83.2 |
| [mobilenet_v2_0.5_128] | 32 | 1.95 | 57.7 | 80.8 |
| [mobilenet_v2_0.5_96] | 18 | 1.95 | 51.2 | 75.8 |
| [mobilenet_v2_0.35_224] | 59 | 1.66 | 60.3 | 82.9 |
| [mobilenet_v2_0.35_192] | 43 | 1.66 | 58.2 | 81.2 |
| [mobilenet_v2_0.35_160] | 30 | 1.66 | 55.7 | 79.1 |
| [mobilenet_v2_0.35_128] | 20 | 1.66 | 50.8 | 75.0 |
| [mobilenet_v2_0.35_96] | 11 | 1.66 | 45.5 | 70.4 |
The weights for all 16 models are obtained and translated from the Tensorflow
checkpoints from TensorFlow checkpoints found at
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md
# Reference
This file contains building code for MobileNetV2, based on
[MobileNetV2: Inverted Residuals and Linear Bottlenecks]
(https://arxiv.org/abs/1801.04381)
Tests comparing this model to the existing Tensorflow model can be
found at
[mobilenet_v2_keras](https://github.com/JonathanCMitchell/mobilenet_v2_keras)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
import tensorflow.compat.v1 as tf
Model = tf.keras.Model
Input = tf.keras.layers.Input
Activation = tf.keras.layers.Activation
BatchNormalization = tf.keras.layers.BatchNormalization
Conv2D = tf.keras.layers.Conv2D
DepthwiseConv2D = tf.keras.layers.DepthwiseConv2D
GlobalAveragePooling2D = tf.keras.layers.GlobalAveragePooling2D
Add = tf.keras.layers.Add
Dense = tf.keras.layers.Dense
K = tf.keras.Backend
def relu6(x):
return K.relu(x, max_value=6)
def _obtain_input_shape(
input_shape,
default_size,
min_size,
data_format,
require_flatten):
"""Internal utility to compute/validate an ImageNet model's input shape.
Args:
input_shape: either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: default input width/height for the model.
min_size: minimum input width/height accepted by the model.
data_format: image data format to use.
require_flatten: whether the model is expected to
be linked to a classifier via a Flatten layer.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: in case of invalid argument values.
"""
if input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[0]) + ' input channels.')
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[-1]) + ' input channels.')
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
'`input_shape` must be a tuple of three integers.')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) +
'; got `input_shape=' +
str(input_shape) + '`')
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
'`input_shape` must be a tuple of three integers.')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) +
'; got `input_shape=' +
str(input_shape) + '`')
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
'Got `input_shape=' + str(input_shape) + '`')
return input_shape
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
This function applies the "Inception" preprocessing which converts
the RGB values from [0, 255] to [-1, 1]. Note that this preprocessing
function is different from `imagenet_utils.preprocess_input()`.
Args:
x: a 4D numpy array consists of RGB values within [0, 255].
Returns:
Preprocessed array.
"""
x /= 128.
x -= 1.
return x.astype(np.float32)
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def mobilenet_v2(input_shape=None,
alpha=1.0,
include_top=True,
classes=1000):
"""Instantiates the MobileNetV2 architecture.
To load a MobileNetV2 model via `load_model`, import the custom
objects `relu6` and pass them to the `custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6})
Args:
input_shape: optional shape tuple, to be specified if you would
like to use a model with an input img resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
width multiplier in the MobileNetV2 paper.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
include_top: whether to include the fully-connected
layer at the top of the network.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape or invalid depth_multiplier, alpha,
rows when weights='imagenet'
"""
# Determine proper input shape and default size.
# If input_shape is None and no input_tensor
if input_shape is None:
default_size = 224
# If input_shape is not None, assume default size
else:
if K.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape,
default_size=default_size,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if K.image_data_format() != 'channels_last':
warnings.warn('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
img_input = Input(shape=input_shape)
first_block_filters = _make_divisible(32 * alpha, 8)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2), padding='same',
use_bias=False, name='Conv1')(img_input)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='bn_Conv1')(x)
x = Activation(relu6, name='Conv1_relu')(x)
x = _first_inverted_res_block(x,
filters=16,
alpha=alpha,
stride=1,
block_id=0)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2,
expansion=6, block_id=6)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=7)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=8)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=9)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=10)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=11)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=12)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2,
expansion=6, block_id=13)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=14)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=15)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1,
expansion=6, block_id=16)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = Conv2D(last_block_filters,
kernel_size=1,
use_bias=False,
name='Conv_1')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(x)
x = Activation(relu6, name='out_relu')(x)
if include_top:
x = GlobalAveragePooling2D()(x)
x = Dense(classes, activation='softmax',
use_bias=True, name='Logits')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenetv2_%0.2f_%s' % (alpha, rows))
if old_data_format:
K.set_image_data_format(old_data_format)
return model
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
"""Build an inverted res block."""
in_channels = int(inputs.shape[-1])
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
# Expand
x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
use_bias=False, activation=None,
name='mobl%d_conv_expand' % block_id)(inputs)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name='bn%d_conv_bn_expand' %
block_id)(x)
x = Activation(relu6, name='conv_%d_relu' % block_id)(x)
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
use_bias=False, padding='same',
name='mobl%d_conv_depthwise' % block_id)(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name='bn%d_conv_depthwise' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
# Project
x = Conv2D(pointwise_filters,
kernel_size=1, padding='same', use_bias=False, activation=None,
name='mobl%d_conv_project' % block_id)(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name='bn%d_conv_bn_project' % block_id)(x)
if in_channels == pointwise_filters and stride == 1:
return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
def _first_inverted_res_block(inputs,
stride,
alpha, filters, block_id):
"""Build the first inverted res block."""
in_channels = int(inputs.shape[-1])
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
# Depthwise
x = DepthwiseConv2D(kernel_size=3,
strides=stride, activation=None,
use_bias=False, padding='same',
name='mobl%d_conv_depthwise' %
block_id)(inputs)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name='bn%d_conv_depthwise' %
block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
# Project
x = Conv2D(pointwise_filters,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name='mobl%d_conv_project' %
block_id)(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name='bn%d_conv_project' %
block_id)(x)
if in_channels == pointwise_filters and stride == 1:
return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
| 18,826 | 38.304802 | 92 | py |
models | models-master/research/object_detection/predictors/rfcn_keras_box_predictor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.rfcn_box_predictor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors import rfcn_keras_box_predictor as box_predictor
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class RfcnKerasBoxPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_get_correct_box_encoding_and_class_prediction_shapes(self):
rfcn_box_predictor = box_predictor.RfcnKerasBoxPredictor(
is_training=False,
num_classes=2,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
num_spatial_bins=[3, 3],
depth=4,
crop_size=[12, 12],
box_code_size=4)
def graph_fn(image_features, proposal_boxes):
box_predictions = rfcn_box_predictor(
[image_features],
proposal_boxes=proposal_boxes)
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32)
(box_encodings, class_predictions_with_background) = self.execute(
graph_fn, [image_features, proposal_boxes])
self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4])
self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3])
if __name__ == '__main__':
tf.test.main()
| 2,972 | 36.1625 | 81 | py |
models | models-master/research/object_detection/predictors/rfcn_keras_box_predictor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RFCN Box Predictor."""
import tensorflow.compat.v1 as tf
from object_detection.core import box_predictor
from object_detection.utils import ops
BOX_ENCODINGS = box_predictor.BOX_ENCODINGS
CLASS_PREDICTIONS_WITH_BACKGROUND = (
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND)
MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS
class RfcnKerasBoxPredictor(box_predictor.KerasBoxPredictor):
"""RFCN Box Predictor.
Applies a position sensitive ROI pooling on position sensitive feature maps to
predict classes and refined locations. See https://arxiv.org/abs/1605.06409
for details.
This is used for the second stage of the RFCN meta architecture. Notice that
locations are *not* shared across classes, thus for each anchor, a separate
prediction is made for each class.
"""
def __init__(self,
is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
num_spatial_bins,
depth,
crop_size,
box_code_size,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
num_spatial_bins: A list of two integers `[spatial_bins_y,
spatial_bins_x]`.
depth: Target depth to reduce the input feature maps to.
crop_size: A list of two integers `[crop_height, crop_width]`.
box_code_size: Size of encoding for each box.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
"""
super(RfcnKerasBoxPredictor, self).__init__(
is_training, num_classes, freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=False, name=name)
self._freeze_batchnorm = freeze_batchnorm
self._conv_hyperparams = conv_hyperparams
self._num_spatial_bins = num_spatial_bins
self._depth = depth
self._crop_size = crop_size
self._box_code_size = box_code_size
# Build the shared layers used for both heads
self._shared_conv_layers = []
self._shared_conv_layers.append(
tf.keras.layers.Conv2D(
self._depth,
[1, 1],
padding='SAME',
name='reduce_depth_conv',
**self._conv_hyperparams.params()))
self._shared_conv_layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='reduce_depth_batchnorm'))
self._shared_conv_layers.append(
self._conv_hyperparams.build_activation_layer(
name='reduce_depth_activation'))
self._box_encoder_layers = []
location_feature_map_depth = (self._num_spatial_bins[0] *
self._num_spatial_bins[1] *
self.num_classes *
self._box_code_size)
self._box_encoder_layers.append(
tf.keras.layers.Conv2D(
location_feature_map_depth,
[1, 1],
padding='SAME',
name='refined_locations_conv',
**self._conv_hyperparams.params()))
self._box_encoder_layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='refined_locations_batchnorm'))
self._class_predictor_layers = []
self._total_classes = self.num_classes + 1 # Account for background class.
class_feature_map_depth = (self._num_spatial_bins[0] *
self._num_spatial_bins[1] *
self._total_classes)
self._class_predictor_layers.append(
tf.keras.layers.Conv2D(
class_feature_map_depth,
[1, 1],
padding='SAME',
name='class_predictions_conv',
**self._conv_hyperparams.params()))
self._class_predictor_layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='class_predictions_batchnorm'))
@property
def num_classes(self):
return self._num_classes
def _predict(self, image_features, proposal_boxes, **kwargs):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
proposal_boxes: A float tensor of shape [batch_size, num_proposals,
box_code_size].
**kwargs: Unused Keyword args
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
Raises:
ValueError: if num_predictions_per_location is not 1 or if
len(image_features) is not 1.
"""
if len(image_features) != 1:
raise ValueError('length of `image_features` must be 1. Found {}'.
format(len(image_features)))
image_feature = image_features[0]
batch_size = tf.shape(proposal_boxes)[0]
num_boxes = tf.shape(proposal_boxes)[1]
net = image_feature
for layer in self._shared_conv_layers:
net = layer(net)
# Location predictions.
box_net = net
for layer in self._box_encoder_layers:
box_net = layer(box_net)
box_encodings = ops.batch_position_sensitive_crop_regions(
box_net,
boxes=proposal_boxes,
crop_size=self._crop_size,
num_spatial_bins=self._num_spatial_bins,
global_pool=True)
box_encodings = tf.squeeze(box_encodings, axis=[2, 3])
box_encodings = tf.reshape(box_encodings,
[batch_size * num_boxes, 1, self.num_classes,
self._box_code_size])
# Class predictions.
class_net = net
for layer in self._class_predictor_layers:
class_net = layer(class_net)
class_predictions_with_background = (
ops.batch_position_sensitive_crop_regions(
class_net,
boxes=proposal_boxes,
crop_size=self._crop_size,
num_spatial_bins=self._num_spatial_bins,
global_pool=True))
class_predictions_with_background = tf.squeeze(
class_predictions_with_background, axis=[2, 3])
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size * num_boxes, 1, self._total_classes])
return {BOX_ENCODINGS: [box_encodings],
CLASS_PREDICTIONS_WITH_BACKGROUND:
[class_predictions_with_background]}
| 8,449 | 40.219512 | 80 | py |
models | models-master/research/object_detection/predictors/convolutional_keras_box_predictor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolutional Box Predictors with and without weight sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.core import box_predictor
from object_detection.utils import shape_utils
from object_detection.utils import static_shape
keras = tf.keras.layers
BOX_ENCODINGS = box_predictor.BOX_ENCODINGS
CLASS_PREDICTIONS_WITH_BACKGROUND = (
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND)
MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS
class _NoopVariableScope(object):
"""A dummy class that does not push any scope."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor):
"""Convolutional Keras Box Predictor.
Optionally add an intermediate 1x1 convolutional layer after features and
predict in parallel branches box_encodings and
class_predictions_with_background.
Currently this box predictor assumes that predictions are "shared" across
classes --- that is each anchor makes box predictions which do not depend
on class.
"""
def __init__(self,
is_training,
num_classes,
box_prediction_heads,
class_prediction_heads,
other_heads,
conv_hyperparams,
num_layers_before_predictor,
min_depth,
max_depth,
freeze_batchnorm,
inplace_batchnorm_update,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
box_prediction_heads: A list of heads that predict the boxes.
class_prediction_heads: A list of heads that predict the classes.
other_heads: A dictionary mapping head names to lists of convolutional
heads.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
Raises:
ValueError: if min_depth > max_depth.
"""
super(ConvolutionalBoxPredictor, self).__init__(
is_training, num_classes, freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
name=name)
if min_depth > max_depth:
raise ValueError('min_depth should be less than or equal to max_depth')
if len(box_prediction_heads) != len(class_prediction_heads):
raise ValueError('All lists of heads must be the same length.')
for other_head_list in other_heads.values():
if len(box_prediction_heads) != len(other_head_list):
raise ValueError('All lists of heads must be the same length.')
self._prediction_heads = {
BOX_ENCODINGS: box_prediction_heads,
CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_heads,
}
if other_heads:
self._prediction_heads.update(other_heads)
# We generate a consistent ordering for the prediction head names,
# So that all workers build the model in the exact same order
self._sorted_head_names = sorted(self._prediction_heads.keys())
self._conv_hyperparams = conv_hyperparams
self._min_depth = min_depth
self._max_depth = max_depth
self._num_layers_before_predictor = num_layers_before_predictor
self._shared_nets = []
def build(self, input_shapes):
"""Creates the variables of the layer."""
if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]):
raise ValueError('This box predictor was constructed with %d heads,'
'but there are %d inputs.' %
(len(self._prediction_heads[BOX_ENCODINGS]),
len(input_shapes)))
for stack_index, input_shape in enumerate(input_shapes):
net = []
# Add additional conv layers before the class predictor.
features_depth = static_shape.get_depth(input_shape)
depth = max(min(features_depth, self._max_depth), self._min_depth)
tf.logging.info(
'depth of additional conv before box predictor: {}'.format(depth))
if depth > 0 and self._num_layers_before_predictor > 0:
for i in range(self._num_layers_before_predictor):
net.append(keras.Conv2D(depth, [1, 1],
name='SharedConvolutions_%d/Conv2d_%d_1x1_%d'
% (stack_index, i, depth),
padding='SAME',
**self._conv_hyperparams.params()))
net.append(self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm'
% (stack_index, i, depth)))
net.append(self._conv_hyperparams.build_activation_layer(
name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation'
% (stack_index, i, depth),
))
# Until certain bugs are fixed in checkpointable lists,
# this net must be appended only once it's been filled with layers
self._shared_nets.append(net)
self.built = True
def _predict(self, image_features, **kwargs):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
**kwargs: Unused Keyword args
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
predictions = collections.defaultdict(list)
for (index, net) in enumerate(image_features):
# Apply shared conv layers before the head predictors.
for layer in self._shared_nets[index]:
net = layer(net)
for head_name in self._sorted_head_names:
head_obj = self._prediction_heads[head_name][index]
prediction = head_obj(net)
predictions[head_name].append(prediction)
return predictions
class WeightSharedConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor):
"""Convolutional Box Predictor with weight sharing based on Keras.
Defines the box predictor as defined in
https://arxiv.org/abs/1708.02002. This class differs from
ConvolutionalBoxPredictor in that it shares weights and biases while
predicting from different feature maps. However, batch_norm parameters are not
shared because the statistics of the activations vary among the different
feature maps.
Also note that separate multi-layer towers are constructed for the box
encoding and class predictors respectively.
"""
def __init__(self,
is_training,
num_classes,
box_prediction_head,
class_prediction_head,
other_heads,
conv_hyperparams,
depth,
num_layers_before_predictor,
freeze_batchnorm,
inplace_batchnorm_update,
kernel_size=3,
apply_batch_norm=False,
share_prediction_tower=False,
use_depthwise=False,
apply_conv_hyperparams_pointwise=False,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
box_prediction_head: The head that predicts the boxes.
class_prediction_head: The head that predicts the classes.
other_heads: A dictionary mapping head names to convolutional
head classes.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
kernel_size: Size of final convolution kernel.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
share_prediction_tower: Whether to share the multi-layer tower among box
prediction head, class prediction head and other heads.
use_depthwise: Whether to use depthwise separable conv2d instead of
regular conv2d.
apply_conv_hyperparams_pointwise: Whether to apply the conv_hyperparams to
the pointwise_initializer and pointwise_regularizer when using depthwise
separable convolutions. By default, conv_hyperparams are only applied to
the depthwise initializer and regularizer when use_depthwise is true.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
"""
super(WeightSharedConvolutionalBoxPredictor, self).__init__(
is_training, num_classes, freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
name=name)
self._box_prediction_head = box_prediction_head
self._prediction_heads = {
CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_head,
}
if other_heads:
self._prediction_heads.update(other_heads)
# We generate a consistent ordering for the prediction head names,
# so that all workers build the model in the exact same order.
self._sorted_head_names = sorted(self._prediction_heads.keys())
self._conv_hyperparams = conv_hyperparams
self._depth = depth
self._num_layers_before_predictor = num_layers_before_predictor
self._kernel_size = kernel_size
self._apply_batch_norm = apply_batch_norm
self._share_prediction_tower = share_prediction_tower
self._use_depthwise = use_depthwise
self._apply_conv_hyperparams_pointwise = apply_conv_hyperparams_pointwise
# Additional projection layers to bring all feature maps to uniform
# channels.
self._additional_projection_layers = []
# The base tower layers for each head.
self._base_tower_layers_for_heads = {
BOX_ENCODINGS: [],
CLASS_PREDICTIONS_WITH_BACKGROUND: [],
}
for head_name in other_heads.keys():
self._base_tower_layers_for_heads[head_name] = []
# A dict maps the tower_name_scope of each head to the shared conv layers in
# the base tower for different feature map levels.
self._head_scope_conv_layers = {}
def _insert_additional_projection_layer(
self, inserted_layer_counter, target_channel):
projection_layers = []
if inserted_layer_counter >= 0:
use_bias = False if (self._apply_batch_norm and not
self._conv_hyperparams.force_use_bias()) else True
projection_layers.append(keras.Conv2D(
target_channel, [1, 1], strides=1, padding='SAME',
name='ProjectionLayer/conv2d_{}'.format(inserted_layer_counter),
**self._conv_hyperparams.params(use_bias=use_bias)))
if self._apply_batch_norm:
projection_layers.append(self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='ProjectionLayer/conv2d_{}/BatchNorm'.format(
inserted_layer_counter)))
inserted_layer_counter += 1
return inserted_layer_counter, projection_layers
def _compute_base_tower(self, tower_name_scope, feature_index):
conv_layers = []
batch_norm_layers = []
activation_layers = []
use_bias = False if (self._apply_batch_norm and not
self._conv_hyperparams.force_use_bias()) else True
for additional_conv_layer_idx in range(self._num_layers_before_predictor):
layer_name = '{}/conv2d_{}'.format(
tower_name_scope, additional_conv_layer_idx)
if tower_name_scope not in self._head_scope_conv_layers:
if self._use_depthwise:
kwargs = self._conv_hyperparams.params(use_bias=use_bias)
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
if self._apply_conv_hyperparams_pointwise:
kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['pointwise_initializer'] = kwargs['kernel_initializer']
conv_layers.append(
tf.keras.layers.SeparableConv2D(
self._depth, [self._kernel_size, self._kernel_size],
padding='SAME',
name=layer_name,
**kwargs))
else:
conv_layers.append(
tf.keras.layers.Conv2D(
self._depth,
[self._kernel_size, self._kernel_size],
padding='SAME',
name=layer_name,
**self._conv_hyperparams.params(use_bias=use_bias)))
# Each feature gets a separate batchnorm parameter even though they share
# the same convolution weights.
if self._apply_batch_norm:
batch_norm_layers.append(self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='{}/conv2d_{}/BatchNorm/feature_{}'.format(
tower_name_scope, additional_conv_layer_idx, feature_index)))
activation_layers.append(self._conv_hyperparams.build_activation_layer(
name='{}/conv2d_{}/activation_{}'.format(
tower_name_scope, additional_conv_layer_idx, feature_index)))
# Set conv layers as the shared conv layers for different feature maps with
# the same tower_name_scope.
if tower_name_scope in self._head_scope_conv_layers:
conv_layers = self._head_scope_conv_layers[tower_name_scope]
# Stack the base_tower_layers in the order of conv_layer, batch_norm_layer
# and activation_layer
base_tower_layers = []
for i in range(self._num_layers_before_predictor):
base_tower_layers.extend([conv_layers[i]])
if self._apply_batch_norm:
base_tower_layers.extend([batch_norm_layers[i]])
base_tower_layers.extend([activation_layers[i]])
return conv_layers, base_tower_layers
def build(self, input_shapes):
"""Creates the variables of the layer."""
feature_channels = [
shape_utils.get_dim_as_int(input_shape[3])
for input_shape in input_shapes
]
has_different_feature_channels = len(set(feature_channels)) > 1
if has_different_feature_channels:
inserted_layer_counter = 0
target_channel = max(set(feature_channels), key=feature_channels.count)
tf.logging.info('Not all feature maps have the same number of '
'channels, found: {}, appending additional projection '
'layers to bring all feature maps to uniformly have {} '
'channels.'.format(feature_channels, target_channel))
else:
# Place holder variables if has_different_feature_channels is False.
target_channel = -1
inserted_layer_counter = -1
def _build_layers(tower_name_scope, feature_index):
conv_layers, base_tower_layers = self._compute_base_tower(
tower_name_scope=tower_name_scope, feature_index=feature_index)
if tower_name_scope not in self._head_scope_conv_layers:
self._head_scope_conv_layers[tower_name_scope] = conv_layers
return base_tower_layers
for feature_index in range(len(input_shapes)):
# Additional projection layers should not be shared as input channels
# (and thus weight shapes) are different
inserted_layer_counter, projection_layers = (
self._insert_additional_projection_layer(
inserted_layer_counter, target_channel))
self._additional_projection_layers.append(projection_layers)
if self._share_prediction_tower:
box_tower_scope = 'PredictionTower'
else:
box_tower_scope = 'BoxPredictionTower'
# For box tower base
box_tower_layers = _build_layers(box_tower_scope, feature_index)
self._base_tower_layers_for_heads[BOX_ENCODINGS].append(box_tower_layers)
for head_name in self._sorted_head_names:
if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND:
tower_name_scope = 'ClassPredictionTower'
else:
tower_name_scope = '{}PredictionTower'.format(head_name)
box_tower_layers = _build_layers(tower_name_scope, feature_index)
self._base_tower_layers_for_heads[head_name].append(box_tower_layers)
self.built = True
def _predict(self, image_features, **kwargs):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
**kwargs: Unused Keyword args
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
predictions = collections.defaultdict(list)
def _apply_layers(base_tower_layers, image_feature):
for layer in base_tower_layers:
image_feature = layer(image_feature)
return image_feature
for (index, image_feature) in enumerate(image_features):
# Apply additional projection layers to image features
for layer in self._additional_projection_layers[index]:
image_feature = layer(image_feature)
# Apply box tower layers.
box_tower_feature = _apply_layers(
self._base_tower_layers_for_heads[BOX_ENCODINGS][index],
image_feature)
box_encodings = self._box_prediction_head(box_tower_feature)
predictions[BOX_ENCODINGS].append(box_encodings)
for head_name in self._sorted_head_names:
head_obj = self._prediction_heads[head_name]
if self._share_prediction_tower:
head_tower_feature = box_tower_feature
else:
head_tower_feature = _apply_layers(
self._base_tower_layers_for_heads[head_name][index],
image_feature)
prediction = head_obj(head_tower_feature)
predictions[head_name].append(prediction)
return predictions
| 22,079 | 43.606061 | 80 | py |
models | models-master/research/object_detection/predictors/mask_rcnn_keras_box_predictor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.mask_rcnn_box_predictor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import mask_rcnn_keras_box_predictor as box_predictor
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MaskRCNNKerasBoxPredictorTest(test_case.TestCase):
def _build_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def test_get_boxes_with_five_classes(self):
mask_box_predictor = (
box_predictor_builder.build_mask_rcnn_keras_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_hyperparams(),
freeze_batchnorm=False,
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
))
def graph_fn(image_features):
box_predictions = mask_box_predictor(
[image_features],
prediction_stage=2)
return (box_predictions[box_predictor.BOX_ENCODINGS],
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND])
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
(box_encodings,
class_predictions_with_background) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4])
self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6])
def test_get_boxes_with_five_classes_share_box_across_classes(self):
mask_box_predictor = (
box_predictor_builder.build_mask_rcnn_keras_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_hyperparams(),
freeze_batchnorm=False,
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
share_box_across_classes=True
))
def graph_fn(image_features):
box_predictions = mask_box_predictor(
[image_features],
prediction_stage=2)
return (box_predictions[box_predictor.BOX_ENCODINGS],
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND])
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
(box_encodings,
class_predictions_with_background) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4])
self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6])
def test_get_instance_masks(self):
mask_box_predictor = (
box_predictor_builder.build_mask_rcnn_keras_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_hyperparams(),
freeze_batchnorm=False,
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
conv_hyperparams=self._build_hyperparams(
op_type=hyperparams_pb2.Hyperparams.CONV),
predict_instance_masks=True))
def graph_fn(image_features):
box_predictions = mask_box_predictor(
[image_features],
prediction_stage=3)
return (box_predictions[box_predictor.MASK_PREDICTIONS],)
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
mask_predictions = self.execute(graph_fn, [image_features])
self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14])
def test_do_not_return_instance_masks_without_request(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = (
box_predictor_builder.build_mask_rcnn_keras_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_hyperparams(),
freeze_batchnorm=False,
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4))
box_predictions = mask_box_predictor(
[image_features],
prediction_stage=2)
self.assertEqual(len(box_predictions), 2)
self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
in box_predictions)
if __name__ == '__main__':
tf.test.main()
| 5,742 | 38.606897 | 86 | py |
models | models-master/research/object_detection/predictors/convolutional_keras_box_predictor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.convolutional_keras_box_predictor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import convolutional_keras_box_predictor as box_predictor
from object_detection.predictors.heads import keras_box_head
from object_detection.predictors.heads import keras_class_head
from object_detection.predictors.heads import keras_mask_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ConvolutionalKerasBoxPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_get_boxes_for_five_aspect_ratios_per_location(self):
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
))
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, objectness_predictions)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, objectness_predictions) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
self.assertAllEqual(objectness_predictions.shape, [4, 320, 1])
def test_get_boxes_for_one_aspect_ratio_per_location(self):
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[1],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
))
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
return (box_encodings, objectness_predictions)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, objectness_predictions) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
self.assertAllEqual(objectness_predictions.shape, [4, 64, 1])
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
self):
num_classes_without_background = 6
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
))
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
(box_encodings,
class_predictions_with_background) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
self.assertAllEqual(class_predictions_with_background.shape,
[4, 320, num_classes_without_background+1])
def test_get_predictions_with_feature_maps_of_dynamic_shape(
self):
tf.keras.backend.clear_session()
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
))
variables = []
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
variables.extend(list(conv_box_predictor.variables))
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return box_encodings, objectness_predictions
resolution = 32
expected_num_anchors = resolution*resolution*5
box_encodings, objectness_predictions = self.execute(
graph_fn, [np.random.rand(4, resolution, resolution, 64)])
actual_variable_set = set([var.name.split(':')[0] for var in variables])
self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4])
self.assertAllEqual(objectness_predictions.shape,
[4, expected_num_anchors, 1])
expected_variable_set = set([
'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
self.assertEqual(expected_variable_set, actual_variable_set)
self.assertEqual(conv_box_predictor._sorted_head_names,
['box_encodings', 'class_predictions_with_background'])
def test_use_depthwise_convolution(self):
tf.keras.backend.clear_session()
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=3,
box_code_size=4,
use_depthwise=True
))
variables = []
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
variables.extend(list(conv_box_predictor.variables))
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return box_encodings, objectness_predictions
resolution = 32
expected_num_anchors = resolution*resolution*5
box_encodings, objectness_predictions = self.execute(
graph_fn, [np.random.rand(4, resolution, resolution, 64)])
actual_variable_set = set([var.name.split(':')[0] for var in variables])
self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4])
self.assertAllEqual(objectness_predictions.shape,
[4, expected_num_anchors, 1])
expected_variable_set = set([
'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/'
'bias',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/'
'depthwise_kernel',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/bias',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/'
'depthwise_kernel',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
self.assertEqual(expected_variable_set, actual_variable_set)
self.assertEqual(conv_box_predictor._sorted_head_names,
['box_encodings', 'class_predictions_with_background'])
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class WeightSharedConvolutionalKerasBoxPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self, add_batch_norm=True):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
"""
if add_batch_norm:
batch_norm_proto = """
batch_norm {
train: true,
}
"""
conv_hyperparams_text_proto += batch_norm_proto
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
# pylint: disable=line-too-long
def test_get_boxes_for_five_aspect_ratios_per_location(self):
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
return (box_encodings, objectness_predictions)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, objectness_predictions) = self.execute(
graph_fn, [image_features])
self.assertAllEqual(box_encodings.shape, [4, 320, 4])
self.assertAllEqual(objectness_predictions.shape, [4, 320, 1])
def test_bias_predictions_to_background_with_sigmoid_score_conversion(self):
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=True,
num_classes=2,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
depth=32,
num_layers_before_predictor=1,
class_prediction_bias_init=-4.6,
box_code_size=4))
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
class_predictions = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
return (tf.nn.sigmoid(class_predictions),)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
class_predictions = self.execute(graph_fn, [image_features])
self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3)
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
self):
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
return (box_encodings, class_predictions_with_background)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, class_predictions_with_background) = self.execute(
graph_fn, [image_features])
self.assertAllEqual(box_encodings.shape, [4, 320, 4])
self.assertAllEqual(class_predictions_with_background.shape,
[4, 320, num_classes_without_background+1])
def test_get_multi_class_predictions_from_two_feature_maps(
self):
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
def graph_fn(image_features1, image_features2):
box_predictions = conv_box_predictor([image_features1, image_features2])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32)
image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, class_predictions_with_background) = self.execute(
graph_fn, [image_features1, image_features2])
self.assertAllEqual(box_encodings.shape, [4, 640, 4])
self.assertAllEqual(class_predictions_with_background.shape,
[4, 640, num_classes_without_background+1])
def test_get_multi_class_predictions_from_feature_maps_of_different_depth(
self):
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5, 5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
def graph_fn(image_features1, image_features2, image_features3):
box_predictions = conv_box_predictor(
[image_features1, image_features2, image_features3])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32)
image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32)
image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32)
(box_encodings, class_predictions_with_background) = self.execute(
graph_fn, [image_features1, image_features2, image_features3])
self.assertAllEqual(box_encodings.shape, [4, 960, 4])
self.assertAllEqual(class_predictions_with_background.shape,
[4, 960, num_classes_without_background+1])
def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm(
self):
tf.keras.backend.clear_session()
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4))
variables = []
def graph_fn(image_features1, image_features2):
box_predictions = conv_box_predictor([image_features1, image_features2])
variables.extend(list(conv_box_predictor.variables))
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
self.execute(graph_fn, [
np.random.rand(4, 32, 32, 3).astype(np.float32),
np.random.rand(4, 16, 16, 3).astype(np.float32)
])
actual_variable_set = set([var.name.split(':')[0] for var in variables])
expected_variable_set = set([
# Box prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_predictions_multiple_feature_maps_share_weights_without_batchnorm(
self):
tf.keras.backend.clear_session()
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
apply_batch_norm=False))
variables = []
def graph_fn(image_features1, image_features2):
box_predictions = conv_box_predictor([image_features1, image_features2])
variables.extend(list(conv_box_predictor.variables))
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
self.execute(graph_fn, [
np.random.rand(4, 32, 32, 3).astype(np.float32),
np.random.rand(4, 16, 16, 3).astype(np.float32)
])
actual_variable_set = set([var.name.split(':')[0] for var in variables])
expected_variable_set = set([
# Box prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/bias'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_predictions_multiple_feature_maps_share_weights_with_depthwise(
self):
tf.keras.backend.clear_session()
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
apply_batch_norm=False,
use_depthwise=True))
variables = []
def graph_fn(image_features1, image_features2):
box_predictions = conv_box_predictor([image_features1, image_features2])
variables.extend(list(conv_box_predictor.variables))
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
self.execute(graph_fn, [
np.random.rand(4, 32, 32, 3).astype(np.float32),
np.random.rand(4, 16, 16, 3).astype(np.float32)
])
actual_variable_set = set([var.name.split(':')[0] for var in variables])
expected_variable_set = set([
# Box prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/bias'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_no_batchnorm_params_when_batchnorm_is_not_configured(self):
tf.keras.backend.clear_session()
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
apply_batch_norm=False))
variables = []
def graph_fn(image_features1, image_features2):
box_predictions = conv_box_predictor(
[image_features1, image_features2])
variables.extend(list(conv_box_predictor.variables))
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
self.execute(graph_fn, [
np.random.rand(4, 32, 32, 3).astype(np.float32),
np.random.rand(4, 16, 16, 3).astype(np.float32)
])
actual_variable_set = set([var.name.split(':')[0] for var in variables])
expected_variable_set = set([
# Box prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/bias'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_predictions_share_weights_share_tower_separate_batchnorm(
self):
tf.keras.backend.clear_session()
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
share_prediction_tower=True))
variables = []
def graph_fn(image_features1, image_features2):
box_predictions = conv_box_predictor(
[image_features1, image_features2])
variables.extend(list(conv_box_predictor.variables))
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
self.execute(graph_fn, [
np.random.rand(4, 32, 32, 3).astype(np.float32),
np.random.rand(4, 16, 16, 3).astype(np.float32)
])
actual_variable_set = set([var.name.split(':')[0] for var in variables])
expected_variable_set = set([
# Shared prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_predictions_share_weights_share_tower_without_batchnorm(
self):
tf.keras.backend.clear_session()
num_classes_without_background = 6
conv_box_predictor = (
box_predictor_builder
.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
share_prediction_tower=True,
apply_batch_norm=False))
variables = []
def graph_fn(image_features1, image_features2):
box_predictions = conv_box_predictor(
[image_features1, image_features2])
variables.extend(list(conv_box_predictor.variables))
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
self.execute(graph_fn, [
np.random.rand(4, 32, 32, 3).astype(np.float32),
np.random.rand(4, 16, 16, 3).astype(np.float32)
])
actual_variable_set = set([var.name.split(':')[0] for var in variables])
expected_variable_set = set([
# Shared prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/bias'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_other_heads_predictions(self):
box_code_size = 4
num_classes_without_background = 3
other_head_name = 'Mask'
mask_height = 5
mask_width = 5
num_predictions_per_location = 5
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
conv_hyperparams=self._build_conv_hyperparams(),
num_predictions_per_location=num_predictions_per_location)
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=num_classes_without_background + 1,
conv_hyperparams=self._build_conv_hyperparams(),
num_predictions_per_location=num_predictions_per_location)
other_heads = {
other_head_name:
keras_mask_head.WeightSharedConvolutionalMaskHead(
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
num_predictions_per_location=num_predictions_per_location,
mask_height=mask_height,
mask_width=mask_width)
}
conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=False,
num_classes=num_classes_without_background,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
depth=32,
num_layers_before_predictor=2)
def graph_fn(image_features):
box_predictions = conv_box_predictor([image_features])
for key, value in box_predictions.items():
box_predictions[key] = tf.concat(value, axis=1)
assert len(box_predictions) == 3
return (box_predictions[box_predictor.BOX_ENCODINGS],
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
box_predictions[other_head_name])
batch_size = 4
feature_ht = 8
feature_wt = 8
image_features = np.random.rand(batch_size, feature_ht, feature_wt,
64).astype(np.float32)
(box_encodings, class_predictions, other_head_predictions) = self.execute(
graph_fn, [image_features])
num_anchors = feature_ht * feature_wt * num_predictions_per_location
self.assertAllEqual(box_encodings.shape,
[batch_size, num_anchors, box_code_size])
self.assertAllEqual(
class_predictions.shape,
[batch_size, num_anchors, num_classes_without_background + 1])
self.assertAllEqual(other_head_predictions.shape, [
batch_size, num_anchors, num_classes_without_background, mask_height,
mask_width
])
if __name__ == '__main__':
tf.test.main()
| 42,980 | 44.100735 | 90 | py |
models | models-master/research/object_detection/predictors/heads/keras_mask_head.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Mask Heads.
Contains Mask prediction head classes for different meta architectures.
All the mask prediction heads have a predict function that receives the
`features` as the first argument and returns `mask_predictions`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.predictors.heads import head
from object_detection.utils import ops
from object_detection.utils import shape_utils
class ConvolutionalMaskHead(head.KerasHead):
"""Convolutional class prediction head."""
def __init__(self,
is_training,
num_classes,
use_dropout,
dropout_keep_prob,
kernel_size,
num_predictions_per_location,
conv_hyperparams,
freeze_batchnorm,
use_depthwise=False,
mask_height=7,
mask_width=7,
masks_are_class_agnostic=False,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: Number of classes.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
num_predictions_per_location: Number of box predictions to be made per
spatial location. Int specifying number of boxes per location.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
mask_height: Desired output mask height. The default value is 7.
mask_width: Desired output mask width. The default value is 7.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
Raises:
ValueError: if min_depth > max_depth.
"""
super(ConvolutionalMaskHead, self).__init__(name=name)
self._is_training = is_training
self._num_classes = num_classes
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._kernel_size = kernel_size
self._num_predictions_per_location = num_predictions_per_location
self._use_depthwise = use_depthwise
self._mask_height = mask_height
self._mask_width = mask_width
self._masks_are_class_agnostic = masks_are_class_agnostic
self._mask_predictor_layers = []
# Add a slot for the background class.
if self._masks_are_class_agnostic:
self._num_masks = 1
else:
self._num_masks = self._num_classes
num_mask_channels = self._num_masks * self._mask_height * self._mask_width
if self._use_dropout:
self._mask_predictor_layers.append(
# The Dropout layer's `training` parameter for the call method must
# be set implicitly by the Keras set_learning_phase. The object
# detection training code takes care of this.
tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob))
if self._use_depthwise:
self._mask_predictor_layers.append(
tf.keras.layers.DepthwiseConv2D(
[self._kernel_size, self._kernel_size],
padding='SAME',
depth_multiplier=1,
strides=1,
dilation_rate=1,
name='MaskPredictor_depthwise',
**conv_hyperparams.params()))
self._mask_predictor_layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name='MaskPredictor_depthwise_batchnorm'))
self._mask_predictor_layers.append(
conv_hyperparams.build_activation_layer(
name='MaskPredictor_depthwise_activation'))
self._mask_predictor_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * num_mask_channels, [1, 1],
name='MaskPredictor',
**conv_hyperparams.params(use_bias=True)))
else:
self._mask_predictor_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * num_mask_channels,
[self._kernel_size, self._kernel_size],
padding='SAME',
name='MaskPredictor',
**conv_hyperparams.params(use_bias=True)))
def _predict(self, features):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
Returns:
mask_predictions: A float tensors of shape
[batch_size, num_anchors, num_masks, mask_height, mask_width]
representing the mask predictions for the proposals.
"""
mask_predictions = features
for layer in self._mask_predictor_layers:
mask_predictions = layer(mask_predictions)
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
mask_predictions = tf.reshape(
mask_predictions,
[batch_size, -1, self._num_masks, self._mask_height, self._mask_width])
return mask_predictions
class MaskRCNNMaskHead(head.KerasHead):
"""Mask RCNN mask prediction head.
This is a piece of Mask RCNN which is responsible for predicting
just the pixelwise foreground scores for regions within the boxes.
Please refer to Mask RCNN paper:
https://arxiv.org/abs/1703.06870
"""
def __init__(self,
is_training,
num_classes,
freeze_batchnorm,
conv_hyperparams,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample=False,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the Mask head is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample: Whether to apply convolutions on mask features
before upsampling using nearest neighbor resizing. Otherwise, mask
features are resized to [`mask_height`, `mask_width`] using bilinear
resizing before applying convolutions.
name: A string name scope to assign to the mask head. If `None`, Keras
will auto-generate one from the class name.
"""
super(MaskRCNNMaskHead, self).__init__(name=name)
self._is_training = is_training
self._freeze_batchnorm = freeze_batchnorm
self._num_classes = num_classes
self._conv_hyperparams = conv_hyperparams
self._mask_height = mask_height
self._mask_width = mask_width
self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers
self._mask_prediction_conv_depth = mask_prediction_conv_depth
self._masks_are_class_agnostic = masks_are_class_agnostic
self._convolve_then_upsample = convolve_then_upsample
self._mask_predictor_layers = []
def build(self, input_shapes):
num_conv_channels = self._mask_prediction_conv_depth
if num_conv_channels == 0:
num_feature_channels = input_shapes.as_list()[3]
num_conv_channels = self._get_mask_predictor_conv_depth(
num_feature_channels, self._num_classes)
for i in range(self._mask_prediction_num_conv_layers - 1):
self._mask_predictor_layers.append(
tf.keras.layers.Conv2D(
num_conv_channels,
[3, 3],
padding='SAME',
name='MaskPredictor_conv2d_{}'.format(i),
**self._conv_hyperparams.params()))
self._mask_predictor_layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='MaskPredictor_batchnorm_{}'.format(i)))
self._mask_predictor_layers.append(
self._conv_hyperparams.build_activation_layer(
name='MaskPredictor_activation_{}'.format(i)))
if self._convolve_then_upsample:
# Replace Transposed Convolution with a Nearest Neighbor upsampling step
# followed by 3x3 convolution.
height_scale = self._mask_height // shape_utils.get_dim_as_int(
input_shapes[1])
width_scale = self._mask_width // shape_utils.get_dim_as_int(
input_shapes[2])
# pylint: disable=g-long-lambda
self._mask_predictor_layers.append(tf.keras.layers.Lambda(
lambda features: ops.nearest_neighbor_upsampling(
features, height_scale=height_scale, width_scale=width_scale)
))
# pylint: enable=g-long-lambda
self._mask_predictor_layers.append(
tf.keras.layers.Conv2D(
num_conv_channels,
[3, 3],
padding='SAME',
name='MaskPredictor_upsample_conv2d',
**self._conv_hyperparams.params()))
self._mask_predictor_layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='MaskPredictor_upsample_batchnorm'))
self._mask_predictor_layers.append(
self._conv_hyperparams.build_activation_layer(
name='MaskPredictor_upsample_activation'))
num_masks = 1 if self._masks_are_class_agnostic else self._num_classes
self._mask_predictor_layers.append(
tf.keras.layers.Conv2D(
num_masks,
[3, 3],
padding='SAME',
name='MaskPredictor_last_conv2d',
**self._conv_hyperparams.params(use_bias=True)))
self.built = True
def _get_mask_predictor_conv_depth(self,
num_feature_channels,
num_classes,
class_weight=3.0,
feature_weight=2.0):
"""Computes the depth of the mask predictor convolutions.
Computes the depth of the mask predictor convolutions given feature channels
and number of classes by performing a weighted average of the two in
log space to compute the number of convolution channels. The weights that
are used for computing the weighted average do not need to sum to 1.
Args:
num_feature_channels: An integer containing the number of feature
channels.
num_classes: An integer containing the number of classes.
class_weight: Class weight used in computing the weighted average.
feature_weight: Feature weight used in computing the weighted average.
Returns:
An integer containing the number of convolution channels used by mask
predictor.
"""
num_feature_channels_log = math.log(float(num_feature_channels), 2.0)
num_classes_log = math.log(float(num_classes), 2.0)
weighted_num_feature_channels_log = (
num_feature_channels_log * feature_weight)
weighted_num_classes_log = num_classes_log * class_weight
total_weight = feature_weight + class_weight
num_conv_channels_log = round(
(weighted_num_feature_channels_log + weighted_num_classes_log) /
total_weight)
return int(math.pow(2.0, num_conv_channels_log))
def _predict(self, features):
"""Predicts pixelwise foreground scores for regions within the boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing features for a batch of images.
Returns:
instance_masks: A float tensor of shape
[batch_size, 1, num_classes, mask_height, mask_width].
"""
if not self._convolve_then_upsample:
features = tf.image.resize_bilinear(
features, [self._mask_height, self._mask_width],
align_corners=True)
mask_predictions = features
for layer in self._mask_predictor_layers:
mask_predictions = layer(mask_predictions)
return tf.expand_dims(
tf.transpose(mask_predictions, perm=[0, 3, 1, 2]),
axis=1,
name='MaskPredictor')
class WeightSharedConvolutionalMaskHead(head.KerasHead):
"""Weight shared convolutional mask prediction head based on Keras."""
def __init__(self,
num_classes,
num_predictions_per_location,
conv_hyperparams,
kernel_size=3,
use_dropout=False,
dropout_keep_prob=0.8,
mask_height=7,
mask_width=7,
masks_are_class_agnostic=False,
name=None):
"""Constructor.
Args:
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
num_predictions_per_location: Number of box predictions to be made per
spatial location. Int specifying number of boxes per location.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
kernel_size: Size of final convolution kernel.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
mask_height: Desired output mask height. The default value is 7.
mask_width: Desired output mask width. The default value is 7.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
Raises:
ValueError: if min_depth > max_depth.
"""
super(WeightSharedConvolutionalMaskHead, self).__init__(name=name)
self._num_classes = num_classes
self._num_predictions_per_location = num_predictions_per_location
self._kernel_size = kernel_size
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._mask_height = mask_height
self._mask_width = mask_width
self._masks_are_class_agnostic = masks_are_class_agnostic
self._mask_predictor_layers = []
if self._masks_are_class_agnostic:
self._num_masks = 1
else:
self._num_masks = self._num_classes
num_mask_channels = self._num_masks * self._mask_height * self._mask_width
if self._use_dropout:
self._mask_predictor_layers.append(
tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob))
self._mask_predictor_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * num_mask_channels,
[self._kernel_size, self._kernel_size],
padding='SAME',
name='MaskPredictor',
**conv_hyperparams.params(use_bias=True)))
def _predict(self, features):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
Returns:
mask_predictions: A tensor of shape
[batch_size, num_anchors, num_classes, mask_height, mask_width]
representing the mask predictions for the proposals.
"""
mask_predictions = features
for layer in self._mask_predictor_layers:
mask_predictions = layer(mask_predictions)
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
mask_predictions = tf.reshape(
mask_predictions,
[batch_size, -1, self._num_masks, self._mask_height, self._mask_width])
return mask_predictions
| 18,622 | 40.662192 | 80 | py |
models | models-master/research/object_detection/predictors/heads/keras_class_head_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.heads.class_head."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import keras_class_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
return class_predictions
class_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20], class_predictions.shape)
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=True)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
return class_predictions
class_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20], class_predictions.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MaskRCNNClassHeadTest(test_case.TestCase):
def _build_fc_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def test_prediction_size(self):
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=False,
num_class_slots=20,
fc_hyperparams=self._build_fc_hyperparams(),
freeze_batchnorm=False,
use_dropout=True,
dropout_keep_prob=0.5)
def graph_fn():
roi_pooled_features = tf.random_uniform(
[64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = class_prediction_head(roi_pooled_features)
return prediction
prediction = self.execute(graph_fn, [])
self.assertAllEqual([64, 1, 20], prediction.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
return class_predictions
class_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20], class_predictions.shape)
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
return class_predictions
class_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20], class_predictions.shape)
def test_variable_count_depth_wise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_prediction_head(image_feature)
self.assertEqual(len(class_prediction_head.variables), 3)
def test_variable_count_depth_wise_False(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_prediction_head(image_feature)
self.assertEqual(len(class_prediction_head.variables), 2)
def test_softmax_score_converter(self):
num_class_slots = 10
batch_size = 2
height = 17
width = 19
num_predictions_per_location = 2
assert num_predictions_per_location != 1
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=num_class_slots,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
score_converter_fn=tf.nn.softmax)
def graph_fn():
image_feature = tf.random_uniform([batch_size, height, width, 1024],
minval=-10.0,
maxval=10.0,
dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
return class_predictions
class_predictions_out = self.execute(graph_fn, [])
class_predictions_sum = np.sum(class_predictions_out, axis=-1)
num_anchors = height * width * num_predictions_per_location
exp_class_predictions_sum = np.ones((batch_size, num_anchors),
dtype=np.float32)
self.assertAllEqual((batch_size, num_anchors, num_class_slots),
class_predictions_out.shape)
self.assertAllClose(class_predictions_sum, exp_class_predictions_sum)
if __name__ == '__main__':
tf.test.main()
| 9,034 | 37.122363 | 80 | py |
models | models-master/research/object_detection/predictors/heads/keras_box_head_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.heads.box_head."""
import unittest
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import keras_box_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ConvolutionalKerasBoxHeadTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
box_prediction_head = keras_box_head.ConvolutionalBoxHead(
is_training=True,
box_code_size=4,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
box_encodings = box_prediction_head(image_feature)
return box_encodings
box_encodings = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 1, 4], box_encodings.shape)
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
box_prediction_head = keras_box_head.ConvolutionalBoxHead(
is_training=True,
box_code_size=4,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=True)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
box_encodings = box_prediction_head(image_feature)
return box_encodings
box_encodings = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 1, 4], box_encodings.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MaskRCNNKerasBoxHeadTest(test_case.TestCase):
def _build_fc_hyperparams(
self, op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def test_prediction_size(self):
box_prediction_head = keras_box_head.MaskRCNNBoxHead(
is_training=False,
num_classes=20,
fc_hyperparams=self._build_fc_hyperparams(),
freeze_batchnorm=False,
use_dropout=True,
dropout_keep_prob=0.5,
box_code_size=4,
share_box_across_classes=False)
def graph_fn():
roi_pooled_features = tf.random_uniform(
[64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = box_prediction_head(roi_pooled_features)
return prediction
prediction = self.execute(graph_fn, [])
self.assertAllEqual([64, 1, 20, 4], prediction.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class WeightSharedConvolutionalKerasBoxHead(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=4,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
box_encodings = box_prediction_head(image_feature)
return box_encodings
box_encodings = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 4], box_encodings.shape)
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=4,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
box_encodings = box_prediction_head(image_feature)
return box_encodings
box_encodings = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 4], box_encodings.shape)
def test_variable_count_depth_wise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=4,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
box_prediction_head(image_feature)
self.assertEqual(len(box_prediction_head.variables), 3)
def test_variable_count_depth_wise_False(self):
conv_hyperparams = self._build_conv_hyperparams()
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=4,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
box_prediction_head(image_feature)
self.assertEqual(len(box_prediction_head.variables), 2)
if __name__ == '__main__':
tf.test.main()
| 7,322 | 35.615 | 80 | py |
models | models-master/research/object_detection/predictors/heads/keras_mask_head_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.heads.mask_head."""
import unittest
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import keras_mask_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ConvolutionalMaskPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_use_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
mask_prediction_head = keras_mask_head.ConvolutionalMaskHead(
is_training=True,
num_classes=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False,
mask_height=7,
mask_width=7)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
mask_predictions = mask_prediction_head(image_feature)
return mask_predictions
mask_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape)
def test_prediction_size_use_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
mask_prediction_head = keras_mask_head.ConvolutionalMaskHead(
is_training=True,
num_classes=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=True,
mask_height=7,
mask_width=7)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
mask_predictions = mask_prediction_head(image_feature)
return mask_predictions
mask_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape)
def test_class_agnostic_prediction_size_use_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
mask_prediction_head = keras_mask_head.ConvolutionalMaskHead(
is_training=True,
num_classes=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False,
mask_height=7,
mask_width=7,
masks_are_class_agnostic=True)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
mask_predictions = mask_prediction_head(image_feature)
return mask_predictions
mask_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape)
def test_class_agnostic_prediction_size_use_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
mask_prediction_head = keras_mask_head.ConvolutionalMaskHead(
is_training=True,
num_classes=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=True,
mask_height=7,
mask_width=7,
masks_are_class_agnostic=True)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
mask_predictions = mask_prediction_head(image_feature)
return mask_predictions
mask_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MaskRCNNMaskHeadTest(test_case.TestCase):
def _build_conv_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.CONV):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def test_prediction_size(self):
mask_prediction_head = keras_mask_head.MaskRCNNMaskHead(
is_training=True,
num_classes=20,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False)
def graph_fn():
roi_pooled_features = tf.random_uniform(
[64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = mask_prediction_head(roi_pooled_features)
return prediction
prediction = self.execute(graph_fn, [])
self.assertAllEqual([64, 1, 20, 14, 14], prediction.shape)
def test_prediction_size_with_convolve_then_upsample(self):
mask_prediction_head = keras_mask_head.MaskRCNNMaskHead(
is_training=True,
num_classes=20,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
mask_height=28,
mask_width=28,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=True,
convolve_then_upsample=True)
def graph_fn():
roi_pooled_features = tf.random_uniform(
[64, 14, 14, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = mask_prediction_head(roi_pooled_features)
return prediction
prediction = self.execute(graph_fn, [])
self.assertAllEqual([64, 1, 1, 28, 28], prediction.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class WeightSharedConvolutionalMaskPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size(self):
mask_prediction_head = (
keras_mask_head.WeightSharedConvolutionalMaskHead(
num_classes=20,
num_predictions_per_location=1,
conv_hyperparams=self._build_conv_hyperparams(),
mask_height=7,
mask_width=7))
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
mask_predictions = mask_prediction_head(image_feature)
return mask_predictions
mask_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape)
def test_class_agnostic_prediction_size(self):
mask_prediction_head = (
keras_mask_head.WeightSharedConvolutionalMaskHead(
num_classes=20,
num_predictions_per_location=1,
conv_hyperparams=self._build_conv_hyperparams(),
mask_height=7,
mask_width=7,
masks_are_class_agnostic=True))
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
mask_predictions = mask_prediction_head(image_feature)
return mask_predictions
mask_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape)
if __name__ == '__main__':
tf.test.main()
| 9,295 | 35.743083 | 80 | py |
models | models-master/research/object_detection/predictors/heads/head.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base head class.
All the different kinds of prediction heads in different models will inherit
from this class. What is in common between all head classes is that they have a
`predict` function that receives `features` as its first argument.
How to add a new prediction head to an existing meta architecture?
For example, how can we add a `3d shape` prediction head to Mask RCNN?
We have to take the following steps to add a new prediction head to an
existing meta arch:
(a) Add a class for predicting the head. This class should inherit from the
`Head` class below and have a `predict` function that receives the features
and predicts the output. The output is always a tf.float32 tensor.
(b) Add the head to the meta architecture. For example in case of Mask RCNN,
go to box_predictor_builder and put in the logic for adding the new head to the
Mask RCNN box predictor.
(c) Add the logic for computing the loss for the new head.
(d) Add the necessary metrics for the new head.
(e) (optional) Add visualization for the new head.
"""
from abc import abstractmethod
import tensorflow.compat.v1 as tf
class Head(object):
"""Mask RCNN head base class."""
def __init__(self):
"""Constructor."""
pass
@abstractmethod
def predict(self, features, num_predictions_per_location):
"""Returns the head's predictions.
Args:
features: A float tensor of features.
num_predictions_per_location: Int containing number of predictions per
location.
Returns:
A tf.float32 tensor.
"""
pass
class KerasHead(tf.keras.layers.Layer):
"""Keras head base class."""
def call(self, features):
"""The Keras model call will delegate to the `_predict` method."""
return self._predict(features)
@abstractmethod
def _predict(self, features):
"""Returns the head's predictions.
Args:
features: A float tensor of features.
Returns:
A tf.float32 tensor.
"""
pass
| 2,645 | 31.268293 | 80 | py |
models | models-master/research/object_detection/predictors/heads/keras_box_head.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Box Head.
Contains Box prediction head classes for different meta architectures.
All the box prediction heads have a _predict function that receives the
`features` as the first argument and returns `box_encodings`.
"""
import tensorflow.compat.v1 as tf
from object_detection.predictors.heads import head
class ConvolutionalBoxHead(head.KerasHead):
"""Convolutional box prediction head."""
def __init__(self,
is_training,
box_code_size,
kernel_size,
num_predictions_per_location,
conv_hyperparams,
freeze_batchnorm,
use_depthwise=False,
box_encodings_clip_range=None,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
num_predictions_per_location: Number of box predictions to be made per
spatial location. Int specifying number of boxes per location.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping box_encodings.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
Raises:
ValueError: if min_depth > max_depth.
ValueError: if use_depthwise is True and kernel_size is 1.
"""
if use_depthwise and (kernel_size == 1):
raise ValueError('Should not use 1x1 kernel when using depthwise conv')
super(ConvolutionalBoxHead, self).__init__(name=name)
self._is_training = is_training
self._box_code_size = box_code_size
self._kernel_size = kernel_size
self._num_predictions_per_location = num_predictions_per_location
self._use_depthwise = use_depthwise
self._box_encodings_clip_range = box_encodings_clip_range
self._box_encoder_layers = []
if self._use_depthwise:
self._box_encoder_layers.append(
tf.keras.layers.DepthwiseConv2D(
[self._kernel_size, self._kernel_size],
padding='SAME',
depth_multiplier=1,
strides=1,
dilation_rate=1,
name='BoxEncodingPredictor_depthwise',
**conv_hyperparams.params()))
self._box_encoder_layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name='BoxEncodingPredictor_depthwise_batchnorm'))
self._box_encoder_layers.append(
conv_hyperparams.build_activation_layer(
name='BoxEncodingPredictor_depthwise_activation'))
self._box_encoder_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * self._box_code_size, [1, 1],
name='BoxEncodingPredictor',
**conv_hyperparams.params(use_bias=True)))
else:
self._box_encoder_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * self._box_code_size,
[self._kernel_size, self._kernel_size],
padding='SAME',
name='BoxEncodingPredictor',
**conv_hyperparams.params(use_bias=True)))
def _predict(self, features):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
Returns:
box_encodings: A float tensor of shape
[batch_size, num_anchors, q, code_size] representing the location of
the objects, where q is 1 or the number of classes.
"""
box_encodings = features
for layer in self._box_encoder_layers:
box_encodings = layer(box_encodings)
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
# Clipping the box encodings to make the inference graph TPU friendly.
if self._box_encodings_clip_range is not None:
box_encodings = tf.clip_by_value(
box_encodings, self._box_encodings_clip_range.min,
self._box_encodings_clip_range.max)
box_encodings = tf.reshape(box_encodings,
[batch_size, -1, 1, self._box_code_size])
return box_encodings
class MaskRCNNBoxHead(head.KerasHead):
"""Box prediction head.
This is a piece of Mask RCNN which is responsible for predicting
just the box encodings.
Please refer to Mask RCNN paper:
https://arxiv.org/abs/1703.06870
"""
def __init__(self,
is_training,
num_classes,
fc_hyperparams,
freeze_batchnorm,
use_dropout,
dropout_keep_prob,
box_code_size,
share_box_across_classes=False,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for fully connected dense ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
name: A string name scope to assign to the box head. If `None`, Keras
will auto-generate one from the class name.
"""
super(MaskRCNNBoxHead, self).__init__(name=name)
self._is_training = is_training
self._num_classes = num_classes
self._fc_hyperparams = fc_hyperparams
self._freeze_batchnorm = freeze_batchnorm
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._box_code_size = box_code_size
self._share_box_across_classes = share_box_across_classes
self._box_encoder_layers = [tf.keras.layers.Flatten()]
if self._use_dropout:
self._box_encoder_layers.append(
tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob))
self._number_of_boxes = 1
if not self._share_box_across_classes:
self._number_of_boxes = self._num_classes
self._box_encoder_layers.append(
tf.keras.layers.Dense(self._number_of_boxes * self._box_code_size,
name='BoxEncodingPredictor_dense'))
self._box_encoder_layers.append(
fc_hyperparams.build_batch_norm(training=(is_training and
not freeze_batchnorm),
name='BoxEncodingPredictor_batchnorm'))
def _predict(self, features):
"""Predicts box encodings.
Args:
features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
Returns:
box_encodings: A float tensor of shape
[batch_size, 1, num_classes, code_size] representing the location of the
objects.
"""
spatial_averaged_roi_pooled_features = tf.reduce_mean(
features, [1, 2], keep_dims=True, name='AvgPool')
net = spatial_averaged_roi_pooled_features
for layer in self._box_encoder_layers:
net = layer(net)
box_encodings = tf.reshape(net,
[-1, 1,
self._number_of_boxes,
self._box_code_size])
return box_encodings
# TODO(b/128922690): Unify the implementations of ConvolutionalBoxHead
# and WeightSharedConvolutionalBoxHead
class WeightSharedConvolutionalBoxHead(head.KerasHead):
"""Weight shared convolutional box prediction head based on Keras.
This head allows sharing the same set of parameters (weights) when called more
then once on different feature maps.
"""
def __init__(self,
box_code_size,
num_predictions_per_location,
conv_hyperparams,
kernel_size=3,
use_depthwise=False,
apply_conv_hyperparams_to_heads=False,
box_encodings_clip_range=None,
return_flat_predictions=True,
name=None):
"""Constructor.
Args:
box_code_size: Size of encoding for each box.
num_predictions_per_location: Number of box predictions to be made per
spatial location. Int specifying number of boxes per location.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
kernel_size: Size of final convolution kernel.
use_depthwise: Whether to use depthwise convolutions for prediction steps.
Default is False.
apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to
depthwise seperable convolution layers in the box and class heads. By
default, the conv_hyperparams are only applied to layers in the
predictor tower when using depthwise separable convolutions.
box_encodings_clip_range: Min and max values for clipping box_encodings.
return_flat_predictions: If true, returns flattened prediction tensor
of shape [batch, height * width * num_predictions_per_location,
box_coder]. Otherwise returns the prediction tensor before reshaping,
whose shape is [batch, height, width, num_predictions_per_location *
num_class_slots].
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
Raises:
ValueError: if use_depthwise is True and kernel_size is 1.
"""
if use_depthwise and (kernel_size == 1):
raise ValueError('Should not use 1x1 kernel when using depthwise conv')
super(WeightSharedConvolutionalBoxHead, self).__init__(name=name)
self._box_code_size = box_code_size
self._kernel_size = kernel_size
self._num_predictions_per_location = num_predictions_per_location
self._use_depthwise = use_depthwise
self._apply_conv_hyperparams_to_heads = apply_conv_hyperparams_to_heads
self._box_encodings_clip_range = box_encodings_clip_range
self._return_flat_predictions = return_flat_predictions
self._box_encoder_layers = []
if self._use_depthwise:
kwargs = conv_hyperparams.params(use_bias=True)
if self._apply_conv_hyperparams_to_heads:
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['pointwise_initializer'] = kwargs['kernel_initializer']
self._box_encoder_layers.append(
tf.keras.layers.SeparableConv2D(
num_predictions_per_location * self._box_code_size,
[self._kernel_size, self._kernel_size],
padding='SAME',
name='BoxPredictor',
**kwargs))
else:
self._box_encoder_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * self._box_code_size,
[self._kernel_size, self._kernel_size],
padding='SAME',
name='BoxPredictor',
**conv_hyperparams.params(use_bias=True)))
def _predict(self, features):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
Returns:
box_encodings: A float tensor of shape
[batch_size, num_anchors, q, code_size] representing the location of
the objects, where q is 1 or the number of classes.
"""
box_encodings = features
for layer in self._box_encoder_layers:
box_encodings = layer(box_encodings)
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
# Clipping the box encodings to make the inference graph TPU friendly.
if self._box_encodings_clip_range is not None:
box_encodings = tf.clip_by_value(
box_encodings, self._box_encodings_clip_range.min,
self._box_encodings_clip_range.max)
if self._return_flat_predictions:
box_encodings = tf.reshape(box_encodings,
[batch_size, -1, self._box_code_size])
return box_encodings
| 14,459 | 40.791908 | 80 | py |
models | models-master/research/object_detection/predictors/heads/keras_class_head.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class Head.
Contains Class prediction head classes for different meta architectures.
All the class prediction heads have a predict function that receives the
`features` as the first argument and returns class predictions with background.
"""
import tensorflow.compat.v1 as tf
from object_detection.predictors.heads import head
from object_detection.utils import shape_utils
class ConvolutionalClassHead(head.KerasHead):
"""Convolutional class prediction head."""
def __init__(self,
is_training,
num_class_slots,
use_dropout,
dropout_keep_prob,
kernel_size,
num_predictions_per_location,
conv_hyperparams,
freeze_batchnorm,
class_prediction_bias_init=0.0,
use_depthwise=False,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_class_slots: number of class slots. Note that num_class_slots may or
may not include an implicit background category.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
num_predictions_per_location: Number of box predictions to be made per
spatial location. Int specifying number of boxes per location.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
Raises:
ValueError: if min_depth > max_depth.
ValueError: if use_depthwise is True and kernel_size is 1.
"""
if use_depthwise and (kernel_size == 1):
raise ValueError('Should not use 1x1 kernel when using depthwise conv')
super(ConvolutionalClassHead, self).__init__(name=name)
self._is_training = is_training
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._kernel_size = kernel_size
self._class_prediction_bias_init = class_prediction_bias_init
self._use_depthwise = use_depthwise
self._num_class_slots = num_class_slots
self._class_predictor_layers = []
if self._use_dropout:
self._class_predictor_layers.append(
# The Dropout layer's `training` parameter for the call method must
# be set implicitly by the Keras set_learning_phase. The object
# detection training code takes care of this.
tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob))
if self._use_depthwise:
self._class_predictor_layers.append(
tf.keras.layers.DepthwiseConv2D(
[self._kernel_size, self._kernel_size],
padding='SAME',
depth_multiplier=1,
strides=1,
dilation_rate=1,
name='ClassPredictor_depthwise',
**conv_hyperparams.params()))
self._class_predictor_layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name='ClassPredictor_depthwise_batchnorm'))
self._class_predictor_layers.append(
conv_hyperparams.build_activation_layer(
name='ClassPredictor_depthwise_activation'))
self._class_predictor_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * self._num_class_slots, [1, 1],
name='ClassPredictor',
**conv_hyperparams.params(use_bias=True)))
else:
self._class_predictor_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * self._num_class_slots,
[self._kernel_size, self._kernel_size],
padding='SAME',
name='ClassPredictor',
bias_initializer=tf.constant_initializer(
self._class_prediction_bias_init),
**conv_hyperparams.params(use_bias=True)))
def _predict(self, features):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
Returns:
class_predictions_with_background: A float tensor of shape
[batch_size, num_anchors, num_class_slots] representing the class
predictions for the proposals.
"""
class_predictions_with_background = features
for layer in self._class_predictor_layers:
class_predictions_with_background = layer(
class_predictions_with_background)
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size, -1, self._num_class_slots])
return class_predictions_with_background
class MaskRCNNClassHead(head.KerasHead):
"""Mask RCNN class prediction head.
This is a piece of Mask RCNN which is responsible for predicting
just the class scores of boxes.
Please refer to Mask RCNN paper:
https://arxiv.org/abs/1703.06870
"""
def __init__(self,
is_training,
num_class_slots,
fc_hyperparams,
freeze_batchnorm,
use_dropout,
dropout_keep_prob,
name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_class_slots: number of class slots. Note that num_class_slots may or
may not include an implicit background category.
fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for fully connected dense ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
name: A string name scope to assign to the class head. If `None`, Keras
will auto-generate one from the class name.
"""
super(MaskRCNNClassHead, self).__init__(name=name)
self._is_training = is_training
self._freeze_batchnorm = freeze_batchnorm
self._num_class_slots = num_class_slots
self._fc_hyperparams = fc_hyperparams
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._class_predictor_layers = [tf.keras.layers.Flatten()]
if self._use_dropout:
self._class_predictor_layers.append(
tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob))
self._class_predictor_layers.append(
tf.keras.layers.Dense(self._num_class_slots,
name='ClassPredictor_dense'))
self._class_predictor_layers.append(
fc_hyperparams.build_batch_norm(training=(is_training and
not freeze_batchnorm),
name='ClassPredictor_batchnorm'))
def _predict(self, features):
"""Predicts the class scores for boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing features for a batch of images.
Returns:
class_predictions_with_background: A float tensor of shape
[batch_size, 1, num_class_slots] representing the class predictions for
the proposals.
"""
spatial_averaged_roi_pooled_features = tf.reduce_mean(
features, [1, 2], keep_dims=True, name='AvgPool')
net = spatial_averaged_roi_pooled_features
for layer in self._class_predictor_layers:
net = layer(net)
class_predictions_with_background = tf.reshape(
net,
[-1, 1, self._num_class_slots])
return class_predictions_with_background
class WeightSharedConvolutionalClassHead(head.KerasHead):
"""Weight shared convolutional class prediction head.
This head allows sharing the same set of parameters (weights) when called more
then once on different feature maps.
"""
def __init__(self,
num_class_slots,
num_predictions_per_location,
conv_hyperparams,
kernel_size=3,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
use_depthwise=False,
apply_conv_hyperparams_to_heads=False,
score_converter_fn=tf.identity,
return_flat_predictions=True,
name=None):
"""Constructor.
Args:
num_class_slots: number of class slots. Note that num_class_slots may or
may not include an implicit background category.
num_predictions_per_location: Number of box predictions to be made per
spatial location. Int specifying number of boxes per location.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
kernel_size: Size of final convolution kernel.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to
depthwise seperable convolution layers in the box and class heads. By
default, the conv_hyperparams are only applied to layers in the
predictor tower when using depthwise separable convolutions.
score_converter_fn: Callable elementwise nonlinearity (that takes tensors
as inputs and returns tensors).
return_flat_predictions: If true, returns flattened prediction tensor
of shape [batch, height * width * num_predictions_per_location,
box_coder]. Otherwise returns the prediction tensor before reshaping,
whose shape is [batch, height, width, num_predictions_per_location *
num_class_slots].
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
Raises:
ValueError: if use_depthwise is True and kernel_size is 1.
"""
if use_depthwise and (kernel_size == 1):
raise ValueError('Should not use 1x1 kernel when using depthwise conv')
super(WeightSharedConvolutionalClassHead, self).__init__(name=name)
self._num_class_slots = num_class_slots
self._num_predictions_per_location = num_predictions_per_location
self._kernel_size = kernel_size
self._class_prediction_bias_init = class_prediction_bias_init
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._use_depthwise = use_depthwise
self._apply_conv_hyperparams_to_heads = apply_conv_hyperparams_to_heads
self._score_converter_fn = score_converter_fn
self._return_flat_predictions = return_flat_predictions
self._class_predictor_layers = []
if self._use_dropout:
self._class_predictor_layers.append(
tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob))
if self._use_depthwise:
kwargs = conv_hyperparams.params(use_bias=True)
if self._apply_conv_hyperparams_to_heads:
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['pointwise_initializer'] = kwargs['kernel_initializer']
self._class_predictor_layers.append(
tf.keras.layers.SeparableConv2D(
num_predictions_per_location * self._num_class_slots,
[self._kernel_size, self._kernel_size],
padding='SAME',
depth_multiplier=1,
strides=1,
name='ClassPredictor',
bias_initializer=tf.constant_initializer(
self._class_prediction_bias_init),
**kwargs))
else:
self._class_predictor_layers.append(
tf.keras.layers.Conv2D(
num_predictions_per_location * self._num_class_slots,
[self._kernel_size, self._kernel_size],
padding='SAME',
name='ClassPredictor',
bias_initializer=tf.constant_initializer(
self._class_prediction_bias_init),
**conv_hyperparams.params(use_bias=True)))
def _predict(self, features):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
Returns:
class_predictions_with_background: A float tensor of shape
[batch_size, num_anchors, num_class_slots] representing the class
predictions for the proposals.
"""
class_predictions_with_background = features
for layer in self._class_predictor_layers:
class_predictions_with_background = layer(
class_predictions_with_background)
batch_size, height, width = shape_utils.combined_static_and_dynamic_shape(
features)[0:3]
class_predictions_with_background = tf.reshape(
class_predictions_with_background, [
batch_size, height, width, self._num_predictions_per_location,
self._num_class_slots
])
class_predictions_with_background = self._score_converter_fn(
class_predictions_with_background)
if self._return_flat_predictions:
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size, -1, self._num_class_slots])
else:
class_predictions_with_background = tf.reshape(
class_predictions_with_background, [
batch_size, height, width,
self._num_predictions_per_location * self._num_class_slots
])
return class_predictions_with_background
| 16,105 | 41.835106 | 80 | py |
models | models-master/research/object_detection/meta_architectures/rfcn_meta_arch.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""R-FCN meta-architecture definition.
R-FCN: Dai, Jifeng, et al. "R-FCN: Object Detection via Region-based
Fully Convolutional Networks." arXiv preprint arXiv:1605.06409 (2016).
The R-FCN meta architecture is similar to Faster R-CNN and only differs in the
second stage. Hence this class inherits FasterRCNNMetaArch and overrides only
the `_predict_second_stage` method.
Similar to Faster R-CNN we allow for two modes: number_of_stages=1 and
number_of_stages=2. In the former setting, all of the user facing methods
(e.g., predict, postprocess, loss) can be used as if the model consisted
only of the RPN, returning class agnostic proposals (these can be thought of as
approximate detections with no associated class information). In the latter
setting, proposals are computed, then passed through a second stage
"box classifier" to yield (multi-class) detections.
Implementations of R-FCN models must define a new FasterRCNNFeatureExtractor and
override three methods: `preprocess`, `_extract_proposal_features` (the first
stage of the model), and `_extract_box_classifier_features` (the second stage of
the model). Optionally, the `restore_fn` method can be overridden. See tests
for an example.
See notes in the documentation of Faster R-CNN meta-architecture as they all
apply here.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import box_predictor
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import ops
class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
"""R-FCN Meta-architecture definition."""
def __init__(self,
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
number_of_stages,
first_stage_anchor_generator,
first_stage_target_assigner,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope_fn,
first_stage_box_predictor_kernel_size,
first_stage_box_predictor_depth,
first_stage_minibatch_size,
first_stage_sampler,
first_stage_non_max_suppression_fn,
first_stage_max_proposals,
first_stage_localization_loss_weight,
first_stage_objectness_loss_weight,
crop_and_resize_fn,
second_stage_target_assigner,
second_stage_rfcn_box_predictor,
second_stage_batch_size,
second_stage_sampler,
second_stage_non_max_suppression_fn,
second_stage_score_conversion_fn,
second_stage_localization_loss_weight,
second_stage_classification_loss_weight,
second_stage_classification_loss,
hard_example_miner,
parallel_iterations=16,
add_summaries=True,
clip_anchors_to_image=False,
use_static_shapes=False,
resize_masks=False,
freeze_batchnorm=False,
return_raw_detections_during_predict=False,
output_final_box_features=False,
output_final_box_rpn_features=False):
"""RFCNMetaArch Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
num_classes: Number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
image_resizer_fn: A callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions.
See builders/image_resizer_builder.py.
feature_extractor: A FasterRCNNFeatureExtractor object.
number_of_stages: Valid values are {1, 2}. If 1 will only construct the
Region Proposal Network (RPN) part of the model.
first_stage_anchor_generator: An anchor_generator.AnchorGenerator object
(note that currently we only support
grid_anchor_generator.GridAnchorGenerator objects)
first_stage_target_assigner: Target assigner to use for first stage of
R-FCN (RPN).
first_stage_atrous_rate: A single integer indicating the atrous rate for
the single convolution op which is applied to the `rpn_features_to_crop`
tensor to obtain a tensor to be used for box prediction. Some feature
extractors optionally allow for producing feature maps computed at
denser resolutions. The atrous rate is used to compensate for the
denser feature maps by using an effectively larger receptive field.
(This should typically be set to 1).
first_stage_box_predictor_arg_scope_fn: Either a
Keras layer hyperparams object or a function to construct tf-slim
arg_scope for conv2d, separable_conv2d and fully_connected ops. Used
for the RPN box predictor. If it is a keras hyperparams object the
RPN box predictor will be a Keras model. If it is a function to
construct an arg scope it will be a tf-slim box predictor.
first_stage_box_predictor_kernel_size: Kernel size to use for the
convolution op just prior to RPN box predictions.
first_stage_box_predictor_depth: Output depth for the convolution op
just prior to RPN box predictions.
first_stage_minibatch_size: The "batch size" to use for computing the
objectness and location loss of the region proposal network. This
"batch size" refers to the number of anchors selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
first_stage_sampler: The sampler for the boxes used to calculate the RPN
loss after the first stage.
first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores` and optional `clip_window`(with
all other inputs already set) and returns a dictionary containing
tensors with keys: `detection_boxes`, `detection_scores`,
`detection_classes`, `num_detections`. This is used to perform non max
suppression on the boxes predicted by the Region Proposal Network
(RPN).
See `post_processing.batch_multiclass_non_max_suppression` for the type
and shape of these tensors.
first_stage_max_proposals: Maximum number of boxes to retain after
performing Non-Max Suppression (NMS) on the boxes predicted by the
Region Proposal Network (RPN).
first_stage_localization_loss_weight: A float
first_stage_objectness_loss_weight: A float
crop_and_resize_fn: A differentiable resampler to use for cropping RPN
proposal features.
second_stage_target_assigner: Target assigner to use for second stage of
R-FCN. If the model is configured with multiple prediction heads, this
target assigner is used to generate targets for all heads (with the
correct `unmatched_class_label`).
second_stage_rfcn_box_predictor: RFCN box predictor to use for
second stage.
second_stage_batch_size: The batch size used for computing the
classification and refined location loss of the box classifier. This
"batch size" refers to the number of proposals selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
second_stage_sampler: The sampler for the boxes used for second stage
box classifier.
second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores`, optional `clip_window` and
optional (kwarg) `mask` inputs (with all other inputs already set)
and returns a dictionary containing tensors with keys:
`detection_boxes`, `detection_scores`, `detection_classes`,
`num_detections`, and (optionally) `detection_masks`. See
`post_processing.batch_multiclass_non_max_suppression` for the type and
shape of these tensors.
second_stage_score_conversion_fn: Callable elementwise nonlinearity
(that takes tensors as inputs and returns tensors). This is usually
used to convert logits to probabilities.
second_stage_localization_loss_weight: A float
second_stage_classification_loss_weight: A float
second_stage_classification_loss: A string indicating which loss function
to use, supports 'softmax' and 'sigmoid'.
hard_example_miner: A losses.HardExampleMiner object (can be None).
parallel_iterations: (Optional) The number of iterations allowed to run
in parallel for calls to tf.map_fn.
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
clip_anchors_to_image: The anchors generated are clip to the
window size without filtering the nonoverlapping anchors. This generates
a static number of anchors. This argument is unused.
use_static_shapes: If True, uses implementation of ops with static shape
guarantees.
resize_masks: Indicates whether the masks presend in the groundtruth
should be resized in the model with `image_resizer_fn`
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
return_raw_detections_during_predict: Whether to return raw detection
boxes in the predict() method. These are decoded boxes that have not
been through postprocessing (i.e. NMS). Default False.
output_final_box_features: Whether to output final box features. If true,
it crops the feature map based on the final box prediction and returns
it in the dict as detection_features.
output_final_box_rpn_features: Whether to output rpn box features. If
true, it crops the rpn feature map based on the final box prediction and
returns it in the dict as detection_features.
Raises:
ValueError: If `second_stage_batch_size` > `first_stage_max_proposals`
ValueError: If first_stage_anchor_generator is not of type
grid_anchor_generator.GridAnchorGenerator.
"""
# TODO(rathodv): add_summaries and crop_and_resize_fn is currently
# unused. Respect that directive in the future.
super(RFCNMetaArch, self).__init__(
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
number_of_stages,
first_stage_anchor_generator,
first_stage_target_assigner,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope_fn,
first_stage_box_predictor_kernel_size,
first_stage_box_predictor_depth,
first_stage_minibatch_size,
first_stage_sampler,
first_stage_non_max_suppression_fn,
first_stage_max_proposals,
first_stage_localization_loss_weight,
first_stage_objectness_loss_weight,
crop_and_resize_fn,
None, # initial_crop_size is not used in R-FCN
None, # maxpool_kernel_size is not use in R-FCN
None, # maxpool_stride is not use in R-FCN
second_stage_target_assigner,
None, # fully_connected_box_predictor is not used in R-FCN.
second_stage_batch_size,
second_stage_sampler,
second_stage_non_max_suppression_fn,
second_stage_score_conversion_fn,
second_stage_localization_loss_weight,
second_stage_classification_loss_weight,
second_stage_classification_loss,
1.0, # second stage mask prediction loss weight isn't used in R-FCN.
hard_example_miner,
parallel_iterations,
add_summaries,
clip_anchors_to_image,
use_static_shapes,
resize_masks,
freeze_batchnorm=freeze_batchnorm,
return_raw_detections_during_predict=(
return_raw_detections_during_predict),
output_final_box_features=output_final_box_features,
output_final_box_rpn_features=output_final_box_rpn_features)
self._rfcn_box_predictor = second_stage_rfcn_box_predictor
def _predict_second_stage(self, rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features,
anchors,
image_shape,
true_image_shapes):
"""Predicts the output tensors from 2nd stage of R-FCN.
Args:
rpn_box_encodings: 3-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes.
rpn_objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
rpn_features: A list of single 4-D float32 tensor with shape
[batch_size, height, width, depth] representing image features from the
RPN.
anchors: 2-D float tensor of shape
[num_anchors, self._box_coder.code_size].
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, 4] representing predicted
(final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals
2) class_predictions_with_background: a 2-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes (in absolute coordinates).
5) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes (in normalized coordinates). Can be used to override
the boxes proposed by the RPN, thus enabling one to extract box
classification and prediction for externally selected areas of the
image.
6) box_classifier_features: a 4-D float32 tensor, of shape
[batch_size, feature_map_height, feature_map_width, depth],
representing the box classifier features.
"""
image_shape_2d = tf.tile(tf.expand_dims(image_shape[1:], 0),
[image_shape[0], 1])
(proposal_boxes_normalized, _, _, num_proposals, _,
_) = self._postprocess_rpn(rpn_box_encodings,
rpn_objectness_predictions_with_background,
anchors, image_shape_2d, true_image_shapes)
rpn_features = rpn_features[0]
box_classifier_features = (
self._extract_box_classifier_features(rpn_features))
if self._rfcn_box_predictor.is_keras_model:
box_predictions = self._rfcn_box_predictor(
[box_classifier_features],
proposal_boxes=proposal_boxes_normalized)
else:
box_predictions = self._rfcn_box_predictor.predict(
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
proposal_boxes=proposal_boxes_normalized)
refined_box_encodings = tf.squeeze(
tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1), axis=1)
class_predictions_with_background = tf.squeeze(
tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1),
axis=1)
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape,
parallel_iterations=self._parallel_iterations)
prediction_dict = {
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': absolute_proposal_boxes,
'box_classifier_features': box_classifier_features,
'proposal_boxes_normalized': proposal_boxes_normalized,
'final_anchors': absolute_proposal_boxes
}
if self._return_raw_detections_during_predict:
prediction_dict.update(self._raw_detections_and_feature_map_inds(
refined_box_encodings, absolute_proposal_boxes))
return prediction_dict
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
reg_losses = super(RFCNMetaArch, self).regularization_losses()
if self._rfcn_box_predictor.is_keras_model:
reg_losses.extend(self._rfcn_box_predictor.losses)
return reg_losses
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
update_ops = super(RFCNMetaArch, self).updates()
if self._rfcn_box_predictor.is_keras_model:
update_ops.extend(
self._rfcn_box_predictor.get_updates_for(None))
update_ops.extend(
self._rfcn_box_predictor.get_updates_for(
self._rfcn_box_predictor.inputs))
return update_ops
| 19,805 | 49.141772 | 80 | py |
models | models-master/research/object_detection/meta_architectures/context_rcnn_meta_arch_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.context_meta_arch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import unittest
from unittest import mock # pylint: disable=g-importing-member
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.builders import post_processing_builder
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.meta_architectures import context_rcnn_meta_arch
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
from object_detection.protos import post_processing_pb2
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
class FakeFasterRCNNFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Fake feature extractor to use in tests."""
def __init__(self):
super(FakeFasterRCNNFeatureExtractor, self).__init__(
is_training=False,
first_stage_features_stride=32,
reuse_weights=None,
weight_decay=0.0)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def _extract_proposal_features(self, preprocessed_inputs, scope):
with tf.variable_scope('mock_model'):
proposal_features = 0 * slim.conv2d(
preprocessed_inputs, num_outputs=3, kernel_size=1, scope='layer1')
return proposal_features, {}
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope('mock_model'):
return 0 * slim.conv2d(
proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer2')
class FakeFasterRCNNKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Fake feature extractor to use in tests."""
def __init__(self):
super(FakeFasterRCNNKerasFeatureExtractor, self).__init__(
is_training=False, first_stage_features_stride=32, weight_decay=0.0)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def get_proposal_feature_extractor_model(self, name):
class ProposalFeatureExtractor(tf.keras.Model):
"""Dummy proposal feature extraction."""
def __init__(self, name):
super(ProposalFeatureExtractor, self).__init__(name=name)
self.conv = None
def build(self, input_shape):
self.conv = tf.keras.layers.Conv2D(
3, kernel_size=1, padding='SAME', name='layer1')
def call(self, inputs):
return self.conv(inputs)
return ProposalFeatureExtractor(name=name)
def get_box_classifier_feature_extractor_model(self, name):
return tf.keras.Sequential([
tf.keras.layers.Conv2D(
3, kernel_size=1, padding='SAME', name=name + '_layer2')
])
class ContextRCNNMetaArchTest(test_case.TestCase, parameterized.TestCase):
def _get_model(self, box_predictor, **common_kwargs):
return context_rcnn_meta_arch.ContextRCNNMetaArch(
initial_crop_size=3,
maxpool_kernel_size=1,
maxpool_stride=1,
second_stage_mask_rcnn_box_predictor=box_predictor,
attention_bottleneck_dimension=10,
attention_temperature=0.2,
**common_kwargs)
def _build_arg_scope_with_hyperparams(self, hyperparams_text_proto,
is_training):
hyperparams = hyperparams_pb2.Hyperparams()
text_format.Merge(hyperparams_text_proto, hyperparams)
return hyperparams_builder.build(hyperparams, is_training=is_training)
def _build_keras_layer_hyperparams(self, hyperparams_text_proto):
hyperparams = hyperparams_pb2.Hyperparams()
text_format.Merge(hyperparams_text_proto, hyperparams)
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def _get_second_stage_box_predictor_text_proto(self,
share_box_across_classes=False
):
share_box_field = 'true' if share_box_across_classes else 'false'
box_predictor_text_proto = """
mask_rcnn_box_predictor {{
fc_hyperparams {{
op: FC
activation: NONE
regularizer {{
l2_regularizer {{
weight: 0.0005
}}
}}
initializer {{
variance_scaling_initializer {{
factor: 1.0
uniform: true
mode: FAN_AVG
}}
}}
}}
share_box_across_classes: {share_box_across_classes}
}}
""".format(share_box_across_classes=share_box_field)
return box_predictor_text_proto
def _get_box_classifier_features_shape(self,
image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
num_features):
return (batch_size * max_num_proposals,
initial_crop_size/maxpool_stride,
initial_crop_size/maxpool_stride,
num_features)
def _get_second_stage_box_predictor(self,
num_classes,
is_training,
predict_masks,
masks_are_class_agnostic,
share_box_across_classes=False,
use_keras=False):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(
self._get_second_stage_box_predictor_text_proto(
share_box_across_classes), box_predictor_proto)
if predict_masks:
text_format.Merge(
self._add_mask_to_second_stage_box_predictor_text_proto(
masks_are_class_agnostic), box_predictor_proto)
if use_keras:
return box_predictor_builder.build_keras(
hyperparams_builder.KerasLayerHyperparams,
inplace_batchnorm_update=False,
freeze_batchnorm=False,
box_predictor_config=box_predictor_proto,
num_classes=num_classes,
num_predictions_per_location_list=None,
is_training=is_training)
else:
return box_predictor_builder.build(
hyperparams_builder.build,
box_predictor_proto,
num_classes=num_classes,
is_training=is_training)
def _build_model(self,
is_training,
number_of_stages,
second_stage_batch_size,
first_stage_max_proposals=8,
num_classes=2,
hard_mining=False,
softmax_second_stage_classification_loss=True,
predict_masks=False,
pad_to_max_dimension=None,
masks_are_class_agnostic=False,
use_matmul_crop_and_resize=False,
clip_anchors_to_image=False,
use_matmul_gather_in_matcher=False,
use_static_shapes=False,
calibration_mapping_value=None,
share_box_across_classes=False,
return_raw_detections_during_predict=False):
use_keras = tf_version.is_tf2()
def image_resizer_fn(image, masks=None):
"""Fake image resizer function."""
resized_inputs = []
resized_image = tf.identity(image)
if pad_to_max_dimension is not None:
resized_image = tf.image.pad_to_bounding_box(image, 0, 0,
pad_to_max_dimension,
pad_to_max_dimension)
resized_inputs.append(resized_image)
if masks is not None:
resized_masks = tf.identity(masks)
if pad_to_max_dimension is not None:
resized_masks = tf.image.pad_to_bounding_box(
tf.transpose(masks, [1, 2, 0]), 0, 0, pad_to_max_dimension,
pad_to_max_dimension)
resized_masks = tf.transpose(resized_masks, [2, 0, 1])
resized_inputs.append(resized_masks)
resized_inputs.append(tf.shape(image))
return resized_inputs
# anchors in this test are designed so that a subset of anchors are inside
# the image and a subset of anchors are outside.
first_stage_anchor_scales = (0.001, 0.005, 0.1)
first_stage_anchor_aspect_ratios = (0.5, 1.0, 2.0)
first_stage_anchor_strides = (1, 1)
first_stage_anchor_generator = grid_anchor_generator.GridAnchorGenerator(
first_stage_anchor_scales,
first_stage_anchor_aspect_ratios,
anchor_stride=first_stage_anchor_strides)
first_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'proposal',
use_matmul_gather=use_matmul_gather_in_matcher)
if use_keras:
fake_feature_extractor = FakeFasterRCNNKerasFeatureExtractor()
else:
fake_feature_extractor = FakeFasterRCNNFeatureExtractor()
first_stage_box_predictor_hyperparams_text_proto = """
op: CONV
activation: RELU
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
}
}
"""
if use_keras:
first_stage_box_predictor_arg_scope_fn = (
self._build_keras_layer_hyperparams(
first_stage_box_predictor_hyperparams_text_proto))
else:
first_stage_box_predictor_arg_scope_fn = (
self._build_arg_scope_with_hyperparams(
first_stage_box_predictor_hyperparams_text_proto, is_training))
first_stage_box_predictor_kernel_size = 3
first_stage_atrous_rate = 1
first_stage_box_predictor_depth = 512
first_stage_minibatch_size = 3
first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=0.5, is_static=use_static_shapes)
first_stage_nms_score_threshold = -1.0
first_stage_nms_iou_threshold = 1.0
first_stage_non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=first_stage_nms_score_threshold,
iou_thresh=first_stage_nms_iou_threshold,
max_size_per_class=first_stage_max_proposals,
max_total_size=first_stage_max_proposals,
use_static_shapes=use_static_shapes)
first_stage_localization_loss_weight = 1.0
first_stage_objectness_loss_weight = 1.0
post_processing_config = post_processing_pb2.PostProcessing()
post_processing_text_proto = """
score_converter: IDENTITY
batch_non_max_suppression {
score_threshold: -20.0
iou_threshold: 1.0
max_detections_per_class: 5
max_total_detections: 5
use_static_shapes: """ + '{}'.format(use_static_shapes) + """
}
"""
if calibration_mapping_value:
calibration_text_proto = """
calibration_config {
function_approximation {
x_y_pairs {
x_y_pair {
x: 0.0
y: %f
}
x_y_pair {
x: 1.0
y: %f
}}}}""" % (calibration_mapping_value, calibration_mapping_value)
post_processing_text_proto = (
post_processing_text_proto + ' ' + calibration_text_proto)
text_format.Merge(post_processing_text_proto, post_processing_config)
second_stage_non_max_suppression_fn, second_stage_score_conversion_fn = (
post_processing_builder.build(post_processing_config))
second_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'detection',
use_matmul_gather=use_matmul_gather_in_matcher)
second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=1.0, is_static=use_static_shapes)
second_stage_localization_loss_weight = 1.0
second_stage_classification_loss_weight = 1.0
if softmax_second_stage_classification_loss:
second_stage_classification_loss = (
losses.WeightedSoftmaxClassificationLoss())
else:
second_stage_classification_loss = (
losses.WeightedSigmoidClassificationLoss())
hard_example_miner = None
if hard_mining:
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=1,
iou_threshold=0.99,
loss_type='both',
cls_loss_weight=second_stage_classification_loss_weight,
loc_loss_weight=second_stage_localization_loss_weight,
max_negatives_per_positive=None)
crop_and_resize_fn = (
spatial_ops.multilevel_matmul_crop_and_resize
if use_matmul_crop_and_resize
else spatial_ops.multilevel_native_crop_and_resize)
common_kwargs = {
'is_training':
is_training,
'num_classes':
num_classes,
'image_resizer_fn':
image_resizer_fn,
'feature_extractor':
fake_feature_extractor,
'number_of_stages':
number_of_stages,
'first_stage_anchor_generator':
first_stage_anchor_generator,
'first_stage_target_assigner':
first_stage_target_assigner,
'first_stage_atrous_rate':
first_stage_atrous_rate,
'first_stage_box_predictor_arg_scope_fn':
first_stage_box_predictor_arg_scope_fn,
'first_stage_box_predictor_kernel_size':
first_stage_box_predictor_kernel_size,
'first_stage_box_predictor_depth':
first_stage_box_predictor_depth,
'first_stage_minibatch_size':
first_stage_minibatch_size,
'first_stage_sampler':
first_stage_sampler,
'first_stage_non_max_suppression_fn':
first_stage_non_max_suppression_fn,
'first_stage_max_proposals':
first_stage_max_proposals,
'first_stage_localization_loss_weight':
first_stage_localization_loss_weight,
'first_stage_objectness_loss_weight':
first_stage_objectness_loss_weight,
'second_stage_target_assigner':
second_stage_target_assigner,
'second_stage_batch_size':
second_stage_batch_size,
'second_stage_sampler':
second_stage_sampler,
'second_stage_non_max_suppression_fn':
second_stage_non_max_suppression_fn,
'second_stage_score_conversion_fn':
second_stage_score_conversion_fn,
'second_stage_localization_loss_weight':
second_stage_localization_loss_weight,
'second_stage_classification_loss_weight':
second_stage_classification_loss_weight,
'second_stage_classification_loss':
second_stage_classification_loss,
'hard_example_miner':
hard_example_miner,
'crop_and_resize_fn':
crop_and_resize_fn,
'clip_anchors_to_image':
clip_anchors_to_image,
'use_static_shapes':
use_static_shapes,
'resize_masks':
True,
'return_raw_detections_during_predict':
return_raw_detections_during_predict
}
return self._get_model(
self._get_second_stage_box_predictor(
num_classes=num_classes,
is_training=is_training,
use_keras=use_keras,
predict_masks=predict_masks,
masks_are_class_agnostic=masks_are_class_agnostic,
share_box_across_classes=share_box_across_classes), **common_kwargs)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
@mock.patch.object(context_rcnn_meta_arch, 'context_rcnn_lib')
def test_prediction_mock_tf1(self, mock_context_rcnn_lib_v1):
"""Mocks the context_rcnn_lib_v1 module to test the prediction.
Using mock object so that we can ensure _compute_box_context_attention is
called in side the prediction function.
Args:
mock_context_rcnn_lib_v1: mock module for the context_rcnn_lib_v1.
"""
model = self._build_model(
is_training=False,
number_of_stages=2,
second_stage_batch_size=6,
num_classes=42)
mock_tensor = tf.ones([2, 8, 3, 3, 3], tf.float32)
mock_context_rcnn_lib_v1._compute_box_context_attention.return_value = mock_tensor
inputs_shape = (2, 20, 20, 3)
inputs = tf.cast(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32),
dtype=tf.float32)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
context_features = tf.random_uniform((2, 20, 10),
minval=0,
maxval=255,
dtype=tf.float32)
valid_context_size = tf.random_uniform((2,),
minval=0,
maxval=10,
dtype=tf.int32)
features = {
fields.InputDataFields.context_features: context_features,
fields.InputDataFields.valid_context_size: valid_context_size
}
side_inputs = model.get_side_inputs(features)
_ = model.predict(preprocessed_inputs, true_image_shapes, **side_inputs)
mock_context_rcnn_lib_v1._compute_box_context_attention.assert_called_once()
@parameterized.named_parameters(
{'testcase_name': 'static_shapes', 'static_shapes': True},
{'testcase_name': 'nostatic_shapes', 'static_shapes': False},
)
def test_prediction_end_to_end(self, static_shapes):
"""Runs prediction end to end and test the shape of the results."""
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=False,
number_of_stages=2,
second_stage_batch_size=6,
use_matmul_crop_and_resize=static_shapes,
clip_anchors_to_image=static_shapes,
use_matmul_gather_in_matcher=static_shapes,
use_static_shapes=static_shapes,
num_classes=42)
def graph_fn():
inputs_shape = (2, 20, 20, 3)
inputs = tf.cast(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32),
dtype=tf.float32)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
context_features = tf.random_uniform((2, 20, 10),
minval=0,
maxval=255,
dtype=tf.float32)
valid_context_size = tf.random_uniform((2,),
minval=0,
maxval=10,
dtype=tf.int32)
features = {
fields.InputDataFields.context_features: context_features,
fields.InputDataFields.valid_context_size: valid_context_size
}
side_inputs = model.get_side_inputs(features)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes,
**side_inputs)
return (prediction_dict['rpn_box_predictor_features'],
prediction_dict['rpn_box_encodings'],
prediction_dict['refined_box_encodings'],
prediction_dict['proposal_boxes_normalized'],
prediction_dict['proposal_boxes'])
execute_fn = self.execute if static_shapes else self.execute_cpu
(rpn_box_predictor_features, rpn_box_encodings, refined_box_encodings,
proposal_boxes_normalized, proposal_boxes) = execute_fn(graph_fn, [],
graph=g)
self.assertAllEqual(len(rpn_box_predictor_features), 1)
self.assertAllEqual(rpn_box_predictor_features[0].shape, [2, 20, 20, 512])
self.assertAllEqual(rpn_box_encodings.shape, [2, 3600, 4])
self.assertAllEqual(refined_box_encodings.shape, [16, 42, 4])
self.assertAllEqual(proposal_boxes_normalized.shape, [2, 8, 4])
self.assertAllEqual(proposal_boxes.shape, [2, 8, 4])
if __name__ == '__main__':
tf.test.main()
| 21,661 | 39.040665 | 86 | py |
models | models-master/research/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the CenterNet Meta architecture code."""
from __future__ import division
import functools
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.builders import post_processing_builder
from object_detection.core import keypoint_ops
from object_detection.core import losses
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as cn_assigner
from object_detection.meta_architectures import center_net_meta_arch as cnma
from object_detection.models import center_net_resnet_feature_extractor
from object_detection.protos import post_processing_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchPredictionHeadTest(
test_case.TestCase, parameterized.TestCase):
"""Test CenterNet meta architecture prediction head."""
@parameterized.parameters([True, False])
def test_prediction_head(self, use_depthwise):
head = cnma.make_prediction_net(num_out_channels=7,
use_depthwise=use_depthwise)
output = head(np.zeros((4, 128, 128, 8)))
self.assertEqual((4, 128, 128, 7), output.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchHelpersTest(test_case.TestCase, parameterized.TestCase):
"""Test for CenterNet meta architecture related functions."""
def test_row_col_channel_indices_from_flattened_indices(self):
"""Tests that the computation of row, col, channel indices is correct."""
r_grid, c_grid, ch_grid = (np.zeros((5, 4, 3), dtype=int),
np.zeros((5, 4, 3), dtype=int),
np.zeros((5, 4, 3), dtype=int))
r_grid[..., 0] = r_grid[..., 1] = r_grid[..., 2] = np.array(
[[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4]]
)
c_grid[..., 0] = c_grid[..., 1] = c_grid[..., 2] = np.array(
[[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]]
)
for i in range(3):
ch_grid[..., i] = i
indices = np.arange(60)
ri, ci, chi = cnma.row_col_channel_indices_from_flattened_indices(
indices, 4, 3)
np.testing.assert_array_equal(ri, r_grid.flatten())
np.testing.assert_array_equal(ci, c_grid.flatten())
np.testing.assert_array_equal(chi, ch_grid.flatten())
def test_row_col_indices_from_flattened_indices(self):
"""Tests that the computation of row, col indices is correct."""
r_grid = np.array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3],
[4, 4, 4, 4]])
c_grid = np.array([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3],
[0, 1, 2, 3]])
indices = np.arange(20)
ri, ci, = cnma.row_col_indices_from_flattened_indices(indices, 4)
np.testing.assert_array_equal(ri, r_grid.flatten())
np.testing.assert_array_equal(ci, c_grid.flatten())
def test_flattened_indices_from_row_col_indices(self):
r = np.array(
[[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]]
)
c = np.array(
[[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]]
)
idx = cnma.flattened_indices_from_row_col_indices(r, c, 4)
np.testing.assert_array_equal(np.arange(12), idx.flatten())
def test_get_valid_anchor_weights_in_flattened_image(self):
"""Tests that the anchor weights are valid upon flattening out."""
valid_weights = np.zeros((2, 5, 5), dtype=float)
valid_weights[0, :3, :4] = 1.0
valid_weights[1, :2, :2] = 1.0
def graph_fn():
true_image_shapes = tf.constant([[3, 4], [2, 2]])
w = cnma.get_valid_anchor_weights_in_flattened_image(
true_image_shapes, 5, 5)
return w
w = self.execute(graph_fn, [])
np.testing.assert_allclose(w, valid_weights.reshape(2, -1))
self.assertEqual((2, 25), w.shape)
def test_convert_strided_predictions_to_normalized_boxes(self):
"""Tests that boxes have correct coordinates in normalized input space."""
def graph_fn():
boxes = np.zeros((2, 3, 4), dtype=np.float32)
boxes[0] = [[10, 20, 30, 40], [20, 30, 50, 100], [50, 60, 100, 180]]
boxes[1] = [[-5, -5, 5, 5], [45, 60, 110, 120], [150, 150, 200, 250]]
true_image_shapes = tf.constant([[100, 90, 3], [150, 150, 3]])
clipped_boxes = (
cnma.convert_strided_predictions_to_normalized_boxes(
boxes, 2, true_image_shapes))
return clipped_boxes
clipped_boxes = self.execute(graph_fn, [])
expected_boxes = np.zeros((2, 3, 4), dtype=np.float32)
expected_boxes[0] = [[0.2, 4./9, 0.6, 8./9], [0.4, 2./3, 1, 1],
[1, 1, 1, 1]]
expected_boxes[1] = [[0., 0, 1./15, 1./15], [3./5, 4./5, 1, 1],
[1, 1, 1, 1]]
np.testing.assert_allclose(expected_boxes, clipped_boxes)
@parameterized.parameters(
{'clip_to_window': True},
{'clip_to_window': False}
)
def test_convert_strided_predictions_to_normalized_keypoints(
self, clip_to_window):
"""Tests that keypoints have correct coordinates in normalized coords."""
keypoint_coords_np = np.array(
[
# Example 0.
[
[[-10., 8.], [60., 22.], [60., 120.]],
[[20., 20.], [0., 0.], [0., 0.]],
],
# Example 1.
[
[[40., 50.], [20., 160.], [200., 150.]],
[[10., 0.], [40., 10.], [0., 0.]],
],
], dtype=np.float32)
keypoint_scores_np = np.array(
[
# Example 0.
[
[1.0, 0.9, 0.2],
[0.7, 0.0, 0.0],
],
# Example 1.
[
[1.0, 1.0, 0.2],
[0.7, 0.6, 0.0],
],
], dtype=np.float32)
def graph_fn():
keypoint_coords = tf.constant(keypoint_coords_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
true_image_shapes = tf.constant([[320, 400, 3], [640, 640, 3]])
stride = 4
keypoint_coords_out, keypoint_scores_out = (
cnma.convert_strided_predictions_to_normalized_keypoints(
keypoint_coords, keypoint_scores, stride, true_image_shapes,
clip_to_window))
return keypoint_coords_out, keypoint_scores_out
keypoint_coords_out, keypoint_scores_out = self.execute(graph_fn, [])
if clip_to_window:
expected_keypoint_coords_np = np.array(
[
# Example 0.
[
[[0.0, 0.08], [0.75, 0.22], [0.75, 1.0]],
[[0.25, 0.2], [0., 0.], [0.0, 0.0]],
],
# Example 1.
[
[[0.25, 0.3125], [0.125, 1.0], [1.0, 0.9375]],
[[0.0625, 0.], [0.25, 0.0625], [0., 0.]],
],
], dtype=np.float32)
expected_keypoint_scores_np = np.array(
[
# Example 0.
[
[0.0, 0.9, 0.0],
[0.7, 0.0, 0.0],
],
# Example 1.
[
[1.0, 1.0, 0.0],
[0.7, 0.6, 0.0],
],
], dtype=np.float32)
else:
expected_keypoint_coords_np = np.array(
[
# Example 0.
[
[[-0.125, 0.08], [0.75, 0.22], [0.75, 1.2]],
[[0.25, 0.2], [0., 0.], [0., 0.]],
],
# Example 1.
[
[[0.25, 0.3125], [0.125, 1.0], [1.25, 0.9375]],
[[0.0625, 0.], [0.25, 0.0625], [0., 0.]],
],
], dtype=np.float32)
expected_keypoint_scores_np = np.array(
[
# Example 0.
[
[1.0, 0.9, 0.2],
[0.7, 0.0, 0.0],
],
# Example 1.
[
[1.0, 1.0, 0.2],
[0.7, 0.6, 0.0],
],
], dtype=np.float32)
np.testing.assert_allclose(expected_keypoint_coords_np, keypoint_coords_out)
np.testing.assert_allclose(expected_keypoint_scores_np, keypoint_scores_out)
def test_convert_strided_predictions_to_instance_masks(self):
def graph_fn():
boxes = tf.constant(
[
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, 0.5, 1.0],
[0.0, 0.0, 0.0, 0.0]],
], tf.float32)
classes = tf.constant(
[
[0, 1, 0],
], tf.int32)
masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32)
masks_np[0, :, 2:, 0] = 1 # Class 0.
masks_np[0, :, :3, 1] = 1 # Class 1.
masks = tf.constant(masks_np)
true_image_shapes = tf.constant([[6, 8, 3]])
instance_masks, _ = cnma.convert_strided_predictions_to_instance_masks(
boxes, classes, masks, stride=2, mask_height=2, mask_width=2,
true_image_shapes=true_image_shapes)
return instance_masks
instance_masks = self.execute_cpu(graph_fn, [])
expected_instance_masks = np.array(
[
[
# Mask 0 (class 0).
[[1, 1],
[1, 1]],
# Mask 1 (class 1).
[[1, 0],
[1, 0]],
# Mask 2 (class 0).
[[0, 0],
[0, 0]],
]
])
np.testing.assert_array_equal(expected_instance_masks, instance_masks)
def test_convert_strided_predictions_raises_error_with_one_tensor(self):
def graph_fn():
boxes = tf.constant(
[
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, 0.5, 1.0],
[0.0, 0.0, 0.0, 0.0]],
], tf.float32)
classes = tf.constant(
[
[0, 1, 0],
], tf.int32)
masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32)
masks_np[0, :, 2:, 0] = 1 # Class 0.
masks_np[0, :, :3, 1] = 1 # Class 1.
masks = tf.constant(masks_np)
true_image_shapes = tf.constant([[6, 8, 3]])
densepose_part_heatmap = tf.random.uniform(
[1, 4, 4, 24])
instance_masks, _ = cnma.convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap=densepose_part_heatmap,
densepose_surface_coords=None)
return instance_masks
with self.assertRaises(ValueError):
self.execute_cpu(graph_fn, [])
def test_crop_and_threshold_masks(self):
boxes_np = np.array(
[[0., 0., 0.5, 0.5],
[0.25, 0.25, 1.0, 1.0]], dtype=np.float32)
classes_np = np.array([0, 2], dtype=np.int32)
masks_np = np.zeros((4, 4, _NUM_CLASSES), dtype=np.float32)
masks_np[0, 0, 0] = 0.8
masks_np[1, 1, 0] = 0.6
masks_np[3, 3, 2] = 0.7
part_heatmap_np = np.zeros((4, 4, _DENSEPOSE_NUM_PARTS), dtype=np.float32)
part_heatmap_np[0, 0, 4] = 1
part_heatmap_np[0, 0, 2] = 0.6 # Lower scoring.
part_heatmap_np[1, 1, 8] = 0.2
part_heatmap_np[3, 3, 4] = 0.5
surf_coords_np = np.zeros((4, 4, 2 * _DENSEPOSE_NUM_PARTS),
dtype=np.float32)
surf_coords_np[:, :, 8:10] = 0.2, 0.9
surf_coords_np[:, :, 16:18] = 0.3, 0.5
true_height, true_width = 10, 10
input_height, input_width = 10, 10
mask_height = 4
mask_width = 4
def graph_fn():
elems = [
tf.constant(boxes_np),
tf.constant(classes_np),
tf.constant(masks_np),
tf.constant(part_heatmap_np),
tf.constant(surf_coords_np),
tf.constant(true_height, dtype=tf.int32),
tf.constant(true_width, dtype=tf.int32)
]
part_masks, surface_coords = cnma.crop_and_threshold_masks(
elems, input_height, input_width, mask_height=mask_height,
mask_width=mask_width, densepose_class_index=0)
return part_masks, surface_coords
part_masks, surface_coords = self.execute_cpu(graph_fn, [])
expected_part_masks = np.zeros((2, 4, 4), dtype=np.uint8)
expected_part_masks[0, 0, 0] = 5 # Recall classes are 1-indexed in output.
expected_part_masks[0, 2, 2] = 9 # Recall classes are 1-indexed in output.
expected_part_masks[1, 3, 3] = 1 # Standard instance segmentation mask.
expected_surface_coords = np.zeros((2, 4, 4, 2), dtype=np.float32)
expected_surface_coords[0, 0, 0, :] = 0.2, 0.9
expected_surface_coords[0, 2, 2, :] = 0.3, 0.5
np.testing.assert_allclose(expected_part_masks, part_masks)
np.testing.assert_allclose(expected_surface_coords, surface_coords)
def test_gather_surface_coords_for_parts(self):
surface_coords_cropped_np = np.zeros((2, 5, 5, _DENSEPOSE_NUM_PARTS, 2),
dtype=np.float32)
surface_coords_cropped_np[0, 0, 0, 5] = 0.3, 0.4
surface_coords_cropped_np[0, 1, 0, 9] = 0.5, 0.6
highest_scoring_part_np = np.zeros((2, 5, 5), dtype=np.int32)
highest_scoring_part_np[0, 0, 0] = 5
highest_scoring_part_np[0, 1, 0] = 9
def graph_fn():
surface_coords_cropped = tf.constant(surface_coords_cropped_np,
tf.float32)
highest_scoring_part = tf.constant(highest_scoring_part_np, tf.int32)
surface_coords_gathered = cnma.gather_surface_coords_for_parts(
surface_coords_cropped, highest_scoring_part)
return surface_coords_gathered
surface_coords_gathered = self.execute_cpu(graph_fn, [])
np.testing.assert_allclose([0.3, 0.4], surface_coords_gathered[0, 0, 0])
np.testing.assert_allclose([0.5, 0.6], surface_coords_gathered[0, 1, 0])
def test_top_k_feature_map_locations(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 1] = 1.0
feature_map_np[0, 2, 1, 1] = 0.9 # Get's filtered due to max pool.
feature_map_np[0, 0, 1, 0] = 0.7
feature_map_np[0, 2, 2, 0] = 0.5
feature_map_np[0, 2, 2, 1] = -0.3
feature_map_np[1, 2, 1, 1] = 0.7
feature_map_np[1, 1, 0, 0] = 0.4
feature_map_np[1, 1, 2, 0] = 0.1
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=3, k=3))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0, 0.7, 0.5], scores[0])
np.testing.assert_array_equal([2, 0, 2], y_inds[0])
np.testing.assert_array_equal([0, 1, 2], x_inds[0])
np.testing.assert_array_equal([1, 0, 0], channel_inds[0])
np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1])
np.testing.assert_array_equal([2, 1, 1], y_inds[1])
np.testing.assert_array_equal([1, 0, 2], x_inds[1])
np.testing.assert_array_equal([1, 0, 0], channel_inds[1])
def test_top_k_feature_map_locations_no_pooling(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 1] = 1.0
feature_map_np[0, 2, 1, 1] = 0.9
feature_map_np[0, 0, 1, 0] = 0.7
feature_map_np[0, 2, 2, 0] = 0.5
feature_map_np[0, 2, 2, 1] = -0.3
feature_map_np[1, 2, 1, 1] = 0.7
feature_map_np[1, 1, 0, 0] = 0.4
feature_map_np[1, 1, 2, 0] = 0.1
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=1, k=3))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0, 0.9, 0.7], scores[0])
np.testing.assert_array_equal([2, 2, 0], y_inds[0])
np.testing.assert_array_equal([0, 1, 1], x_inds[0])
np.testing.assert_array_equal([1, 1, 0], channel_inds[0])
np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1])
np.testing.assert_array_equal([2, 1, 1], y_inds[1])
np.testing.assert_array_equal([1, 0, 2], x_inds[1])
np.testing.assert_array_equal([1, 0, 0], channel_inds[1])
def test_top_k_feature_map_locations_very_large(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 1] = 1.0
def graph_fn():
feature_map = tf.constant(feature_map_np)
feature_map.set_shape(tf.TensorShape([2, 3, None, 2]))
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=1, k=3000))
return scores, y_inds, x_inds, channel_inds
# graph execution will fail if large k's are not handled.
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
self.assertEqual(scores.shape, (2, 18))
self.assertEqual(y_inds.shape, (2, 18))
self.assertEqual(x_inds.shape, (2, 18))
self.assertEqual(channel_inds.shape, (2, 18))
def test_top_k_feature_map_locations_per_channel(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 0] = 1.0 # Selected.
feature_map_np[0, 2, 1, 0] = 0.9 # Get's filtered due to max pool.
feature_map_np[0, 0, 1, 0] = 0.7 # Selected.
feature_map_np[0, 2, 2, 1] = 0.5 # Selected.
feature_map_np[0, 0, 0, 1] = 0.3 # Selected.
feature_map_np[1, 2, 1, 0] = 0.7 # Selected.
feature_map_np[1, 1, 0, 0] = 0.4 # Get's filtered due to max pool.
feature_map_np[1, 1, 2, 0] = 0.3 # Get's filtered due to max pool.
feature_map_np[1, 1, 0, 1] = 0.8 # Selected.
feature_map_np[1, 1, 2, 1] = 0.3 # Selected.
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=3, k=2, per_channel=True))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0, 0.7, 0.5, 0.3], scores[0])
np.testing.assert_array_equal([2, 0, 2, 0], y_inds[0])
np.testing.assert_array_equal([0, 1, 2, 0], x_inds[0])
np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[0])
np.testing.assert_allclose([0.7, 0.0, 0.8, 0.3], scores[1])
np.testing.assert_array_equal([2, 0, 1, 1], y_inds[1])
np.testing.assert_array_equal([1, 0, 0, 2], x_inds[1])
np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[1])
def test_top_k_feature_map_locations_k1(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 0] = 1.0 # Selected.
feature_map_np[0, 2, 1, 0] = 0.9
feature_map_np[0, 0, 1, 0] = 0.7
feature_map_np[0, 2, 2, 1] = 0.5
feature_map_np[0, 0, 0, 1] = 0.3
feature_map_np[1, 2, 1, 0] = 0.7
feature_map_np[1, 1, 0, 0] = 0.4
feature_map_np[1, 1, 2, 0] = 0.3
feature_map_np[1, 1, 0, 1] = 0.8 # Selected.
feature_map_np[1, 1, 2, 1] = 0.3
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=3, k=1, per_channel=False))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0], scores[0])
np.testing.assert_array_equal([2], y_inds[0])
np.testing.assert_array_equal([0], x_inds[0])
np.testing.assert_array_equal([0], channel_inds[0])
np.testing.assert_allclose([0.8], scores[1])
np.testing.assert_array_equal([1], y_inds[1])
np.testing.assert_array_equal([0], x_inds[1])
np.testing.assert_array_equal([1], channel_inds[1])
def test_top_k_feature_map_locations_k1_per_channel(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 0] = 1.0 # Selected.
feature_map_np[0, 2, 1, 0] = 0.9
feature_map_np[0, 0, 1, 0] = 0.7
feature_map_np[0, 2, 2, 1] = 0.5 # Selected.
feature_map_np[0, 0, 0, 1] = 0.3
feature_map_np[1, 2, 1, 0] = 0.7 # Selected.
feature_map_np[1, 1, 0, 0] = 0.4
feature_map_np[1, 1, 2, 0] = 0.3
feature_map_np[1, 1, 0, 1] = 0.8 # Selected.
feature_map_np[1, 1, 2, 1] = 0.3
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=3, k=1, per_channel=True))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0, 0.5], scores[0])
np.testing.assert_array_equal([2, 2], y_inds[0])
np.testing.assert_array_equal([0, 2], x_inds[0])
np.testing.assert_array_equal([0, 1], channel_inds[0])
np.testing.assert_allclose([0.7, 0.8], scores[1])
np.testing.assert_array_equal([2, 1], y_inds[1])
np.testing.assert_array_equal([1, 0], x_inds[1])
np.testing.assert_array_equal([0, 1], channel_inds[1])
def test_box_prediction(self):
class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32)
hw_pred = np.zeros((3, 128, 128, 2), dtype=np.float32)
offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32)
# Sample 1, 2 boxes
class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0]
hw_pred[0, 10, 20] = [40, 60]
offset_pred[0, 10, 20] = [1, 2]
class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45]
hw_pred[0, 50, 60] = [50, 50]
offset_pred[0, 50, 60] = [0, 0]
# Sample 2, 2 boxes (at same location)
class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0]
hw_pred[1, 100, 100] = [10, 10]
offset_pred[1, 100, 100] = [1, 3]
# Sample 3, 3 boxes
class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8]
hw_pred[2, 60, 90] = [40, 30]
offset_pred[2, 60, 90] = [0, 0]
class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0]
hw_pred[2, 65, 95] = [20, 20]
offset_pred[2, 65, 95] = [1, 2]
class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0]
hw_pred[2, 75, 85] = [21, 25]
offset_pred[2, 75, 85] = [5, 2]
def graph_fn():
class_pred_tensor = tf.constant(class_pred)
hw_pred_tensor = tf.constant(hw_pred)
offset_pred_tensor = tf.constant(offset_pred)
_, y_indices, x_indices, _ = (
cnma.top_k_feature_map_locations(
class_pred_tensor, max_pool_kernel_size=3, k=2))
boxes = cnma.prediction_tensors_to_boxes(
y_indices, x_indices, hw_pred_tensor, offset_pred_tensor)
return boxes
boxes = self.execute(graph_fn, [])
np.testing.assert_allclose(
[[0, 0, 31, 52], [25, 35, 75, 85]], boxes[0])
np.testing.assert_allclose(
[[96, 98, 106, 108], [96, 98, 106, 108]], boxes[1])
np.testing.assert_allclose(
[[69.5, 74.5, 90.5, 99.5], [40, 75, 80, 105]], boxes[2])
def test_offset_prediction(self):
class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32)
offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32)
# Sample 1, 2 boxes
class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0]
offset_pred[0, 10, 20] = [1, 2]
class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45]
offset_pred[0, 50, 60] = [0, 0]
# Sample 2, 2 boxes (at same location)
class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0]
offset_pred[1, 100, 100] = [1, 3]
# Sample 3, 3 boxes
class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8]
offset_pred[2, 60, 90] = [0, 0]
class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0]
offset_pred[2, 65, 95] = [1, 2]
class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0]
offset_pred[2, 75, 85] = [5, 2]
def graph_fn():
class_pred_tensor = tf.constant(class_pred)
offset_pred_tensor = tf.constant(offset_pred)
_, y_indices, x_indices, _ = (
cnma.top_k_feature_map_locations(
class_pred_tensor, max_pool_kernel_size=3, k=2))
offsets = cnma.prediction_tensors_to_temporal_offsets(
y_indices, x_indices, offset_pred_tensor)
return offsets
offsets = self.execute(graph_fn, [])
np.testing.assert_allclose(
[[1, 2], [0, 0]], offsets[0])
np.testing.assert_allclose(
[[1, 3], [1, 3]], offsets[1])
np.testing.assert_allclose(
[[5, 2], [0, 0]], offsets[2])
def test_keypoint_candidate_prediction(self):
keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_np[0, 0, 0, 0] = 1.0
keypoint_heatmap_np[0, 2, 1, 0] = 0.7
keypoint_heatmap_np[0, 1, 1, 0] = 0.6
keypoint_heatmap_np[0, 0, 2, 1] = 0.7
keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score.
keypoint_heatmap_np[0, 2, 2, 1] = 0.2
keypoint_heatmap_np[1, 1, 0, 0] = 0.6
keypoint_heatmap_np[1, 2, 1, 0] = 0.5
keypoint_heatmap_np[1, 0, 0, 0] = 0.4
keypoint_heatmap_np[1, 0, 0, 1] = 1.0
keypoint_heatmap_np[1, 0, 1, 1] = 0.9
keypoint_heatmap_np[1, 2, 0, 1] = 0.8
keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25]
keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5]
keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0]
keypoint_heatmap_offsets_np[0, 0, 2] = [1.0, 0.0]
keypoint_heatmap_offsets_np[0, 2, 2] = [1.0, 1.0]
keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5]
keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0]
keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, -0.5]
keypoint_heatmap_offsets_np[1, 0, 1] = [0.5, -0.5]
keypoint_heatmap_offsets_np[1, 2, 0] = [-1.0, -0.5]
def graph_fn():
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_heatmap_offsets = tf.constant(
keypoint_heatmap_offsets_np, dtype=tf.float32)
(keypoint_cands, keypoint_scores, num_keypoint_candidates, _) = (
cnma.prediction_tensors_to_keypoint_candidates(
keypoint_heatmap,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.5,
max_pool_kernel_size=1,
max_candidates=2))
return keypoint_cands, keypoint_scores, num_keypoint_candidates
(keypoint_cands, keypoint_scores,
num_keypoint_candidates) = self.execute(graph_fn, [])
expected_keypoint_candidates = [
[ # Example 0.
[[0.5, 0.25], [1.0, 2.0]], # Keypoint 1.
[[1.75, 1.5], [1.0, 1.0]], # Keypoint 2.
],
[ # Example 1.
[[1.25, 0.5], [0.0, -0.5]], # Keypoint 1.
[[2.5, 1.0], [0.5, 0.5]], # Keypoint 2.
],
]
expected_keypoint_scores = [
[ # Example 0.
[1.0, 0.7], # Keypoint 1.
[0.7, 0.3], # Keypoint 2.
],
[ # Example 1.
[0.6, 1.0], # Keypoint 1.
[0.5, 0.9], # Keypoint 2.
],
]
expected_num_keypoint_candidates = [
[2, 1],
[2, 2]
]
np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands)
np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores)
np.testing.assert_array_equal(expected_num_keypoint_candidates,
num_keypoint_candidates)
def test_prediction_to_single_instance_keypoints(self):
image_size = (9, 9)
object_heatmap_np = np.zeros((1, image_size[0], image_size[1], 1),
dtype=np.float32)
# This should be picked.
object_heatmap_np[0, 4, 4, 0] = 0.9
# This shouldn't be picked since it's farther away from the center.
object_heatmap_np[0, 2, 2, 0] = 1.0
keypoint_heatmap_np = np.zeros((1, image_size[0], image_size[1], 4),
dtype=np.float32)
# Top-left corner should be picked.
keypoint_heatmap_np[0, 1, 1, 0] = 0.9
keypoint_heatmap_np[0, 4, 4, 0] = 1.0
# Top-right corner should be picked.
keypoint_heatmap_np[0, 1, 7, 1] = 0.9
keypoint_heatmap_np[0, 4, 4, 1] = 1.0
# Bottom-left corner should be picked.
keypoint_heatmap_np[0, 7, 1, 2] = 0.9
keypoint_heatmap_np[0, 4, 4, 2] = 1.0
# Bottom-right corner should be picked.
keypoint_heatmap_np[0, 7, 7, 3] = 0.9
keypoint_heatmap_np[0, 4, 4, 3] = 1.0
keypoint_offset_np = np.zeros((1, image_size[0], image_size[1], 8),
dtype=np.float32)
keypoint_offset_np[0, 1, 1] = [0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[0, 1, 7] = [0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[0, 7, 1] = [0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 0.0, 0.0]
keypoint_offset_np[0, 7, 7] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, -0.5]
keypoint_regression_np = np.zeros((1, image_size[0], image_size[1], 8),
dtype=np.float32)
keypoint_regression_np[0, 4, 4] = [-3, -3, -3, 3, 3, -3, 3, 3]
kp_params = get_fake_kp_params(
candidate_ranking_mode='score_distance_ratio')
def graph_fn():
object_heatmap = tf.constant(object_heatmap_np, dtype=tf.float32)
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_offset = tf.constant(keypoint_offset_np, dtype=tf.float32)
keypoint_regression = tf.constant(
keypoint_regression_np, dtype=tf.float32)
(keypoint_cands, keypoint_scores, _) = (
cnma.prediction_to_single_instance_keypoints(
object_heatmap,
keypoint_heatmap,
keypoint_offset,
keypoint_regression,
kp_params=kp_params))
return keypoint_cands, keypoint_scores
(keypoint_cands, keypoint_scores) = self.execute(graph_fn, [])
expected_keypoint_candidates = [[[
[1.5, 1.5], # top-left
[1.5, 6.5], # top-right
[6.5, 1.5], # bottom-left
[6.5, 6.5], # bottom-right
]]]
expected_keypoint_scores = [[[0.9, 0.9, 0.9, 0.9]]]
np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands)
np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores)
@parameterized.parameters({'provide_keypoint_score': True},
{'provide_keypoint_score': False})
def test_prediction_to_multi_instance_keypoints(self, provide_keypoint_score):
image_size = (9, 9)
keypoint_heatmap_np = np.zeros((image_size[0], image_size[1], 3, 4),
dtype=np.float32)
# Instance 0.
keypoint_heatmap_np[1, 1, 0, 0] = 0.9
keypoint_heatmap_np[1, 7, 0, 1] = 0.9
keypoint_heatmap_np[7, 1, 0, 2] = 0.9
keypoint_heatmap_np[7, 7, 0, 3] = 0.9
# Instance 1.
keypoint_heatmap_np[2, 2, 1, 0] = 0.8
keypoint_heatmap_np[2, 8, 1, 1] = 0.8
keypoint_heatmap_np[8, 2, 1, 2] = 0.8
keypoint_heatmap_np[8, 8, 1, 3] = 0.8
keypoint_offset_np = np.zeros((image_size[0], image_size[1], 8),
dtype=np.float32)
keypoint_offset_np[1, 1] = [0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[1, 7] = [0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[7, 1] = [0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 0.0, 0.0]
keypoint_offset_np[7, 7] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, -0.5]
keypoint_offset_np[2, 2] = [0.3, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[2, 8] = [0.0, 0.0, 0.3, -0.3, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[8, 2] = [0.0, 0.0, 0.0, 0.0, -0.3, 0.3, 0.0, 0.0]
keypoint_offset_np[8, 8] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3, -0.3]
def graph_fn():
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_offset = tf.constant(keypoint_offset_np, dtype=tf.float32)
if provide_keypoint_score:
(keypoint_cands, keypoint_scores) = (
cnma.prediction_tensors_to_multi_instance_kpts(
keypoint_heatmap,
keypoint_offset,
tf.reduce_max(keypoint_heatmap, axis=2)))
else:
(keypoint_cands, keypoint_scores) = (
cnma.prediction_tensors_to_multi_instance_kpts(
keypoint_heatmap,
keypoint_offset))
return keypoint_cands, keypoint_scores
(keypoint_cands, keypoint_scores) = self.execute(graph_fn, [])
expected_keypoint_candidates_0 = [
[1.5, 1.5], # top-left
[1.5, 6.5], # top-right
[6.5, 1.5], # bottom-left
[6.5, 6.5], # bottom-right
]
expected_keypoint_scores_0 = [0.9, 0.9, 0.9, 0.9]
expected_keypoint_candidates_1 = [
[2.3, 2.3], # top-left
[2.3, 7.7], # top-right
[7.7, 2.3], # bottom-left
[7.7, 7.7], # bottom-right
]
expected_keypoint_scores_1 = [0.8, 0.8, 0.8, 0.8]
np.testing.assert_allclose(
expected_keypoint_candidates_0, keypoint_cands[0, 0, :, :])
np.testing.assert_allclose(
expected_keypoint_candidates_1, keypoint_cands[0, 1, :, :])
np.testing.assert_allclose(
expected_keypoint_scores_0, keypoint_scores[0, 0, :])
np.testing.assert_allclose(
expected_keypoint_scores_1, keypoint_scores[0, 1, :])
def test_keypoint_candidate_prediction_per_keypoints(self):
keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_np[0, 0, 0, 0] = 1.0
keypoint_heatmap_np[0, 2, 1, 0] = 0.7
keypoint_heatmap_np[0, 1, 1, 0] = 0.6
keypoint_heatmap_np[0, 0, 2, 1] = 0.7
keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score.
keypoint_heatmap_np[0, 2, 2, 1] = 0.2
keypoint_heatmap_np[1, 1, 0, 0] = 0.6
keypoint_heatmap_np[1, 2, 1, 0] = 0.5
keypoint_heatmap_np[1, 0, 0, 0] = 0.4
keypoint_heatmap_np[1, 0, 0, 1] = 1.0
keypoint_heatmap_np[1, 0, 1, 1] = 0.9
keypoint_heatmap_np[1, 2, 0, 1] = 0.8
# Note that the keypoint offsets are now per keypoint (as opposed to
# keypoint agnostic, in the test test_keypoint_candidate_prediction).
keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 4), dtype=np.float32)
keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25, 0.0, 0.0]
keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5, 0.0, 0.0]
keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0, 0.0, 0.0]
keypoint_heatmap_offsets_np[0, 0, 2] = [0.0, 0.0, 1.0, 0.0]
keypoint_heatmap_offsets_np[0, 2, 2] = [0.0, 0.0, 1.0, 1.0]
keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5, 0.0, 0.0]
keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0, 0.0, 0.0]
keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, 0.0, 0.0, -0.5]
keypoint_heatmap_offsets_np[1, 0, 1] = [0.0, 0.0, 0.5, -0.5]
keypoint_heatmap_offsets_np[1, 2, 0] = [0.0, 0.0, -1.0, -0.5]
def graph_fn():
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_heatmap_offsets = tf.constant(
keypoint_heatmap_offsets_np, dtype=tf.float32)
(keypoint_cands, keypoint_scores, num_keypoint_candidates, _) = (
cnma.prediction_tensors_to_keypoint_candidates(
keypoint_heatmap,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.5,
max_pool_kernel_size=1,
max_candidates=2))
return keypoint_cands, keypoint_scores, num_keypoint_candidates
(keypoint_cands, keypoint_scores,
num_keypoint_candidates) = self.execute(graph_fn, [])
expected_keypoint_candidates = [
[ # Example 0.
[[0.5, 0.25], [1.0, 2.0]], # Candidate 1 of keypoint 1, 2.
[[1.75, 1.5], [1.0, 1.0]], # Candidate 2 of keypoint 1, 2.
],
[ # Example 1.
[[1.25, 0.5], [0.0, -0.5]], # Candidate 1 of keypoint 1, 2.
[[2.5, 1.0], [0.5, 0.5]], # Candidate 2 of keypoint 1, 2.
],
]
expected_keypoint_scores = [
[ # Example 0.
[1.0, 0.7], # Candidate 1 scores of keypoint 1, 2.
[0.7, 0.3], # Candidate 2 scores of keypoint 1, 2.
],
[ # Example 1.
[0.6, 1.0], # Candidate 1 scores of keypoint 1, 2.
[0.5, 0.9], # Candidate 2 scores of keypoint 1, 2.
],
]
expected_num_keypoint_candidates = [
[2, 1],
[2, 2]
]
np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands)
np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores)
np.testing.assert_array_equal(expected_num_keypoint_candidates,
num_keypoint_candidates)
@parameterized.parameters({'per_keypoint_depth': True},
{'per_keypoint_depth': False})
def test_keypoint_candidate_prediction_depth(self, per_keypoint_depth):
keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_np[0, 0, 0, 0] = 1.0
keypoint_heatmap_np[0, 2, 1, 0] = 0.7
keypoint_heatmap_np[0, 1, 1, 0] = 0.6
keypoint_heatmap_np[0, 0, 2, 1] = 0.7
keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score.
keypoint_heatmap_np[0, 2, 2, 1] = 0.2
keypoint_heatmap_np[1, 1, 0, 0] = 0.6
keypoint_heatmap_np[1, 2, 1, 0] = 0.5
keypoint_heatmap_np[1, 0, 0, 0] = 0.4
keypoint_heatmap_np[1, 0, 0, 1] = 1.0
keypoint_heatmap_np[1, 0, 1, 1] = 0.9
keypoint_heatmap_np[1, 2, 0, 1] = 0.8
if per_keypoint_depth:
keypoint_depths_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_depths_np[0, 0, 0, 0] = -1.5
keypoint_depths_np[0, 2, 1, 0] = -1.0
keypoint_depths_np[0, 0, 2, 1] = 1.5
else:
keypoint_depths_np = np.zeros((2, 3, 3, 1), dtype=np.float32)
keypoint_depths_np[0, 0, 0, 0] = -1.5
keypoint_depths_np[0, 2, 1, 0] = -1.0
keypoint_depths_np[0, 0, 2, 0] = 1.5
keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25]
keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5]
keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0]
keypoint_heatmap_offsets_np[0, 0, 2] = [1.0, 0.0]
keypoint_heatmap_offsets_np[0, 2, 2] = [1.0, 1.0]
keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5]
keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0]
keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, -0.5]
keypoint_heatmap_offsets_np[1, 0, 1] = [0.5, -0.5]
keypoint_heatmap_offsets_np[1, 2, 0] = [-1.0, -0.5]
def graph_fn():
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_heatmap_offsets = tf.constant(
keypoint_heatmap_offsets_np, dtype=tf.float32)
keypoint_depths = tf.constant(keypoint_depths_np, dtype=tf.float32)
(keypoint_cands, keypoint_scores, num_keypoint_candidates,
keypoint_depths) = (
cnma.prediction_tensors_to_keypoint_candidates(
keypoint_heatmap,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.5,
max_pool_kernel_size=1,
max_candidates=2,
keypoint_depths=keypoint_depths))
return (keypoint_cands, keypoint_scores, num_keypoint_candidates,
keypoint_depths)
(_, keypoint_scores, _, keypoint_depths) = self.execute(graph_fn, [])
expected_keypoint_scores = [
[ # Example 0.
[1.0, 0.7], # Keypoint 1.
[0.7, 0.3], # Keypoint 2.
],
[ # Example 1.
[0.6, 1.0], # Keypoint 1.
[0.5, 0.9], # Keypoint 2.
],
]
expected_keypoint_depths = [
[
[-1.5, 1.5],
[-1.0, 0.0],
],
[
[0., 0.],
[0., 0.],
],
]
np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores)
np.testing.assert_allclose(expected_keypoint_depths, keypoint_depths)
def test_regressed_keypoints_at_object_centers(self):
batch_size = 2
num_keypoints = 5
num_instances = 6
regressed_keypoint_feature_map_np = np.random.randn(
batch_size, 10, 10, 2 * num_keypoints).astype(np.float32)
y_indices = np.random.choice(10, (batch_size, num_instances))
x_indices = np.random.choice(10, (batch_size, num_instances))
offsets = np.stack([y_indices, x_indices], axis=2).astype(np.float32)
def graph_fn():
regressed_keypoint_feature_map = tf.constant(
regressed_keypoint_feature_map_np, dtype=tf.float32)
gathered_regressed_keypoints = (
cnma.regressed_keypoints_at_object_centers(
regressed_keypoint_feature_map,
tf.constant(y_indices, dtype=tf.int32),
tf.constant(x_indices, dtype=tf.int32)))
return gathered_regressed_keypoints
gathered_regressed_keypoints = self.execute(graph_fn, [])
expected_gathered_keypoints_0 = regressed_keypoint_feature_map_np[
0, y_indices[0], x_indices[0], :]
expected_gathered_keypoints_1 = regressed_keypoint_feature_map_np[
1, y_indices[1], x_indices[1], :]
expected_gathered_keypoints = np.stack([
expected_gathered_keypoints_0,
expected_gathered_keypoints_1], axis=0)
expected_gathered_keypoints = np.reshape(
expected_gathered_keypoints,
[batch_size, num_instances, num_keypoints, 2])
expected_gathered_keypoints += np.expand_dims(offsets, axis=2)
expected_gathered_keypoints = np.reshape(
expected_gathered_keypoints,
[batch_size, num_instances, -1])
np.testing.assert_allclose(expected_gathered_keypoints,
gathered_regressed_keypoints)
@parameterized.parameters(
{'candidate_ranking_mode': 'min_distance'},
{'candidate_ranking_mode': 'score_distance_ratio'},
)
def test_refine_keypoints(self, candidate_ranking_mode):
regressed_keypoints_np = np.array(
[
# Example 0.
[
[[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0.
[[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
keypoint_candidates_np = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0.
[[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1.
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], # Candidate 2.
],
# Example 1.
[
[[6.0, 1.5], [0.1, 0.4], [0.0, 0.0]], # Candidate 0.
[[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1.
[[0.0, 0.0], [0.1, 0.3], [0.0, 0.0]], # Candidate 2.
]
], dtype=np.float32)
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.8, 0.9, 1.0], # Candidate 0.
[0.6, 0.1, 0.9], # Candidate 1.
[0.0, 0.0, 0.0], # Candidate 1.
],
# Example 1.
[
[0.7, 0.3, 0.0], # Candidate 0.
[0.6, 0.1, 0.0], # Candidate 1.
[0.0, 0.28, 0.0], # Candidate 1.
]
], dtype=np.float32)
num_keypoints_candidates_np = np.array(
[
# Example 0.
[2, 2, 2],
# Example 1.
[2, 3, 0],
], dtype=np.int32)
unmatched_keypoint_score = 0.1
def graph_fn():
regressed_keypoints = tf.constant(
regressed_keypoints_np, dtype=tf.float32)
keypoint_candidates = tf.constant(
keypoint_candidates_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
num_keypoint_candidates = tf.constant(num_keypoints_candidates_np,
dtype=tf.int32)
# The behavior of bboxes=None is different now. We provide the bboxes
# explicitly by using the regressed keypoints to create the same
# behavior.
regressed_keypoints_flattened = tf.reshape(
regressed_keypoints, [-1, 3, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
regressed_keypoints_flattened)
(refined_keypoints, refined_scores, _) = cnma.refine_keypoints(
regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=bboxes_flattened,
unmatched_keypoint_score=unmatched_keypoint_score,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode=candidate_ranking_mode)
return refined_keypoints, refined_scores
refined_keypoints, refined_scores = self.execute(graph_fn, [])
if candidate_ranking_mode == 'min_distance':
expected_refined_keypoints = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0.
[[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
expected_refined_scores = np.array(
[
# Example 0.
[
[0.8, 0.9, unmatched_keypoint_score], # Instance 0.
[unmatched_keypoint_score, # Instance 1.
unmatched_keypoint_score, 1.0],
],
# Example 1.
[
[0.7, 0.1, unmatched_keypoint_score], # Instance 0.
[unmatched_keypoint_score, # Instance 1.
0.1, unmatched_keypoint_score],
],
], dtype=np.float32)
else:
expected_refined_keypoints = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 1.5], [0.1, 0.3], [0.1, 0.1]], # Instance 0.
[[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
expected_refined_scores = np.array(
[
# Example 0.
[
[0.8, 0.9, unmatched_keypoint_score], # Instance 0.
[unmatched_keypoint_score, # Instance 1.
unmatched_keypoint_score, 1.0],
],
# Example 1.
[
[0.7, 0.28, unmatched_keypoint_score], # Instance 0.
[unmatched_keypoint_score, # Instance 1.
0.1, unmatched_keypoint_score],
],
], dtype=np.float32)
np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints)
np.testing.assert_allclose(expected_refined_scores, refined_scores)
def test_refine_keypoints_with_empty_regressed_keypoints(self):
regressed_keypoints_np = np.zeros((1, 0, 2, 2), dtype=np.float32)
keypoint_candidates_np = np.ones((1, 1, 2, 2), dtype=np.float32)
keypoint_scores_np = np.ones((1, 1, 2), dtype=np.float32)
num_keypoints_candidates_np = np.ones((1, 1), dtype=np.int32)
unmatched_keypoint_score = 0.1
def graph_fn():
regressed_keypoints = tf.constant(
regressed_keypoints_np, dtype=tf.float32)
keypoint_candidates = tf.constant(
keypoint_candidates_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
num_keypoint_candidates = tf.constant(num_keypoints_candidates_np,
dtype=tf.int32)
# The behavior of bboxes=None is different now. We provide the bboxes
# explicitly by using the regressed keypoints to create the same
# behavior.
regressed_keypoints_flattened = tf.reshape(
regressed_keypoints, [-1, 3, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
regressed_keypoints_flattened)
(refined_keypoints, refined_scores, _) = cnma.refine_keypoints(
regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=bboxes_flattened,
unmatched_keypoint_score=unmatched_keypoint_score,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance')
return refined_keypoints, refined_scores
refined_keypoints, refined_scores = self.execute(graph_fn, [])
self.assertEqual(refined_keypoints.shape, (1, 0, 2, 2))
self.assertEqual(refined_scores.shape, (1, 0, 2))
def test_refine_keypoints_without_bbox(self):
regressed_keypoints_np = np.array(
[
# Example 0.
[
[[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1.
],
], dtype=np.float32)
keypoint_candidates_np = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0.
[[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1.
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], # Candidate 2.
],
], dtype=np.float32)
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.8, 0.9, 1.0], # Candidate 0.
[0.6, 0.1, 0.9], # Candidate 1.
[0.0, 0.0, 0.0], # Candidate 1.
],
], dtype=np.float32)
num_keypoints_candidates_np = np.array(
[
# Example 0.
[2, 2, 2],
], dtype=np.int32)
unmatched_keypoint_score = 0.1
def graph_fn():
regressed_keypoints = tf.constant(
regressed_keypoints_np, dtype=tf.float32)
keypoint_candidates = tf.constant(
keypoint_candidates_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
num_keypoint_candidates = tf.constant(num_keypoints_candidates_np,
dtype=tf.int32)
(refined_keypoints, refined_scores, _) = cnma.refine_keypoints(
regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=None,
unmatched_keypoint_score=unmatched_keypoint_score,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance')
return refined_keypoints, refined_scores
refined_keypoints, refined_scores = self.execute(graph_fn, [])
# The expected refined keypoints pick the ones that are closest to the
# regressed keypoint locations without filtering out the candidates which
# are outside of the bounding box.
expected_refined_keypoints = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Instance 0.
[[1.0, 8.0], [0.0, 0.0], [4.0, 7.0]], # Instance 1.
],
], dtype=np.float32)
expected_refined_scores = np.array(
[
# Example 0.
[
[0.8, 0.9, 1.0], # Instance 0.
[0.6, 0.1, 1.0], # Instance 1.
],
], dtype=np.float32)
np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints)
np.testing.assert_allclose(expected_refined_scores, refined_scores)
@parameterized.parameters({'predict_depth': True}, {'predict_depth': False})
def test_refine_keypoints_with_bboxes(self, predict_depth):
regressed_keypoints_np = np.array(
[
# Example 0.
[
[[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0.
[[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
keypoint_candidates_np = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0.
[[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1.
],
# Example 1.
[
[[6.0, 1.5], [5.0, 5.0], [0.0, 0.0]], # Candidate 0.
[[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1.
]
], dtype=np.float32)
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.8, 0.9, 1.0], # Candidate 0.
[0.6, 0.1, 0.9], # Candidate 1.
],
# Example 1.
[
[0.7, 0.4, 0.0], # Candidate 0.
[0.6, 0.1, 0.0], # Candidate 1.
]
],
dtype=np.float32)
keypoint_depths_np = np.array(
[
# Example 0.
[
[-0.8, -0.9, -1.0], # Candidate 0.
[-0.6, -0.1, -0.9], # Candidate 1.
],
# Example 1.
[
[-0.7, -0.4, -0.0], # Candidate 0.
[-0.6, -0.1, -0.0], # Candidate 1.
]
],
dtype=np.float32)
num_keypoints_candidates_np = np.array(
[
# Example 0.
[2, 2, 2],
# Example 1.
[2, 2, 0],
], dtype=np.int32)
bboxes_np = np.array(
[
# Example 0.
[
[2.0, 2.0, 14.0, 10.0], # Instance 0.
[0.0, 3.0, 5.0, 7.0], # Instance 1.
],
# Example 1.
[
[0.0, 0.0, 6.0, 2.0], # Instance 0.
[5.0, 1.4, 9.0, 5.0], # Instance 1.
],
], dtype=np.float32)
unmatched_keypoint_score = 0.1
def graph_fn():
regressed_keypoints = tf.constant(
regressed_keypoints_np, dtype=tf.float32)
keypoint_candidates = tf.constant(
keypoint_candidates_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
if predict_depth:
keypoint_depths = tf.constant(keypoint_depths_np, dtype=tf.float32)
else:
keypoint_depths = None
num_keypoint_candidates = tf.constant(num_keypoints_candidates_np,
dtype=tf.int32)
bboxes = tf.constant(bboxes_np, dtype=tf.float32)
(refined_keypoints, refined_scores,
refined_depths) = cnma.refine_keypoints(
regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=bboxes,
unmatched_keypoint_score=unmatched_keypoint_score,
box_scale=1.0,
candidate_search_scale=0.3,
keypoint_depth_candidates=keypoint_depths)
if predict_depth:
return refined_keypoints, refined_scores, refined_depths
else:
return refined_keypoints, refined_scores
expected_refined_keypoints = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.0], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0.
[[6.0, 1.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
expected_refined_scores = np.array(
[
# Example 0.
[
[0.8, unmatched_keypoint_score, # Instance 0.
unmatched_keypoint_score],
[unmatched_keypoint_score, # Instance 1.
unmatched_keypoint_score, 1.0],
],
# Example 1.
[
[0.7, 0.1, unmatched_keypoint_score], # Instance 0.
[0.7, 0.4, unmatched_keypoint_score], # Instance 1.
],
], dtype=np.float32)
if predict_depth:
refined_keypoints, refined_scores, refined_depths = self.execute(
graph_fn, [])
expected_refined_depths = np.array([[[-0.8, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[-0.7, -0.1, 0.0], [-0.7, -0.4,
0.0]]])
np.testing.assert_allclose(expected_refined_depths, refined_depths)
else:
refined_keypoints, refined_scores = self.execute(graph_fn, [])
np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints)
np.testing.assert_allclose(expected_refined_scores, refined_scores)
def test_sdr_scaled_ranking_score(self):
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.9, 0.9, 0.9], # Candidate 0.
[0.9, 0.9, 0.9], # Candidate 1.
]
],
dtype=np.float32)
distances_np = np.expand_dims(
np.array(
[
# Instance 0.
[
[2.0, 1.0, 0.0], # Candidate 0.
[2.0, 1.0, 2.0], # Candidate 1.
],
# Instance 1.
[
[2.0, 1.0, 0.0], # Candidate 0.
[2.0, 1.0, 2.0], # Candidate 1.
]
],
dtype=np.float32),
axis=0)
bboxes_np = np.array(
[
# Example 0.
[
[2.0, 2.0, 20.0, 20.0], # Instance 0 large box.
[3.0, 3.0, 4.0, 4.0], # Instance 1 small box.
],
],
dtype=np.float32)
# def graph_fn():
keypoint_scores = tf.constant(
keypoint_scores_np, dtype=tf.float32)
distances = tf.constant(
distances_np, dtype=tf.float32)
bboxes = tf.constant(bboxes_np, dtype=tf.float32)
ranking_scores = cnma.sdr_scaled_ranking_score(
keypoint_scores=keypoint_scores,
distances=distances,
bboxes=bboxes,
score_distance_multiplier=0.1)
self.assertAllEqual([1, 2, 2, 3], ranking_scores.shape)
# When the scores are the same, larger distance results in lower ranking
# score.
# instance 0, candidate 0, keypoint type 0 v.s 1 vs. 2
self.assertGreater(ranking_scores[0, 0, 0, 2], ranking_scores[0, 0, 0, 1])
self.assertGreater(ranking_scores[0, 0, 0, 1], ranking_scores[0, 0, 0, 0])
# When the scores are the same, the difference of distances are the same,
# instance with larger bbox has less ranking score difference, i.e. less
# sensitive to the distance change.
# instance 0 vs. 1, candidate 0, keypoint type 0 and 1
self.assertGreater(
ranking_scores[0, 1, 1, 1] - ranking_scores[0, 1, 1, 0],
ranking_scores[0, 0, 1, 1] - ranking_scores[0, 0, 1, 0]
)
def test_gaussian_weighted_score(self):
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.9, 0.9, 0.9], # Candidate 0.
[1.0, 0.8, 1.0], # Candidate 1.
]
],
dtype=np.float32)
distances_np = np.expand_dims(
np.array(
[
# Instance 0.
[
[2.0, 1.0, 0.0], # Candidate 0.
[1.0, 0.0, 2.0], # Candidate 1.
],
# Instance 1.
[
[2.0, 1.0, 0.0], # Candidate 0.
[1.0, 0.0, 2.0], # Candidate 1.
]
],
dtype=np.float32),
axis=0)
bboxes_np = np.array(
[
# Example 0.
[
[2.0, 2.0, 20.0, 20.0], # Instance 0 large box.
[3.0, 3.0, 4.0, 4.0], # Instance 1 small box.
],
],
dtype=np.float32)
# def graph_fn():
keypoint_scores = tf.constant(
keypoint_scores_np, dtype=tf.float32)
distances = tf.constant(
distances_np, dtype=tf.float32)
bboxes = tf.constant(bboxes_np, dtype=tf.float32)
ranking_scores = cnma.gaussian_weighted_score(
keypoint_scores=keypoint_scores,
distances=distances,
keypoint_std_dev=[1.0, 0.5, 1.5],
bboxes=bboxes)
self.assertAllEqual([1, 2, 2, 3], ranking_scores.shape)
# When distance is zero, the candidate's score remains the same.
# instance 0, candidate 0, keypoint type 2
self.assertAlmostEqual(ranking_scores[0, 0, 0, 2], keypoint_scores[0, 0, 2])
# instance 0, candidate 1, keypoint type 1
self.assertAlmostEqual(ranking_scores[0, 0, 1, 1], keypoint_scores[0, 1, 1])
# When the distances of two candidates are 1:2 and the keypoint standard
# deviation is 1:2 and the keypoint heatmap scores are the same, the
# resulting ranking score should be the same.
# instance 0, candidate 0, keypoint type 0, 1.
self.assertAlmostEqual(
ranking_scores[0, 0, 0, 0], ranking_scores[0, 0, 0, 1])
# When the distances/heatmap scores/keypoint standard deviations are the
# same, the instance with larger bbox size gets higher score.
self.assertGreater(ranking_scores[0, 0, 0, 0], ranking_scores[0, 1, 0, 0])
def test_pad_to_full_keypoint_dim(self):
batch_size = 4
num_instances = 8
num_keypoints = 2
keypoint_inds = [1, 3]
num_total_keypoints = 5
kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2)
kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints)
def graph_fn():
kpt_coords = tf.constant(kpt_coords_np)
kpt_scores = tf.constant(kpt_scores_np)
kpt_coords_padded, kpt_scores_padded = (
cnma._pad_to_full_keypoint_dim(
kpt_coords, kpt_scores, keypoint_inds, num_total_keypoints))
return kpt_coords_padded, kpt_scores_padded
kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, [])
self.assertAllEqual([batch_size, num_instances, num_total_keypoints, 2],
kpt_coords_padded.shape)
self.assertAllEqual([batch_size, num_instances, num_total_keypoints],
kpt_scores_padded.shape)
for i, kpt_ind in enumerate(keypoint_inds):
np.testing.assert_allclose(kpt_coords_np[:, :, i, :],
kpt_coords_padded[:, :, kpt_ind, :])
np.testing.assert_allclose(kpt_scores_np[:, :, i],
kpt_scores_padded[:, :, kpt_ind])
def test_pad_to_full_instance_dim(self):
batch_size = 4
max_instances = 8
num_keypoints = 6
num_instances = 2
instance_inds = [1, 3]
kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2)
kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints)
def graph_fn():
kpt_coords = tf.constant(kpt_coords_np)
kpt_scores = tf.constant(kpt_scores_np)
kpt_coords_padded, kpt_scores_padded = (
cnma._pad_to_full_instance_dim(
kpt_coords, kpt_scores, instance_inds, max_instances))
return kpt_coords_padded, kpt_scores_padded
kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, [])
self.assertAllEqual([batch_size, max_instances, num_keypoints, 2],
kpt_coords_padded.shape)
self.assertAllEqual([batch_size, max_instances, num_keypoints],
kpt_scores_padded.shape)
for i, inst_ind in enumerate(instance_inds):
np.testing.assert_allclose(kpt_coords_np[:, i, :, :],
kpt_coords_padded[:, inst_ind, :, :])
np.testing.assert_allclose(kpt_scores_np[:, i, :],
kpt_scores_padded[:, inst_ind, :])
def test_predicted_embeddings_at_object_centers(self):
batch_size = 2
embedding_size = 5
num_instances = 6
predicted_embedding_feature_map_np = np.random.randn(
batch_size, 10, 10, embedding_size).astype(np.float32)
y_indices = np.random.choice(10, (batch_size, num_instances))
x_indices = np.random.choice(10, (batch_size, num_instances))
def graph_fn():
predicted_embedding_feature_map = tf.constant(
predicted_embedding_feature_map_np, dtype=tf.float32)
gathered_predicted_embeddings = (
cnma.predicted_embeddings_at_object_centers(
predicted_embedding_feature_map,
tf.constant(y_indices, dtype=tf.int32),
tf.constant(x_indices, dtype=tf.int32)))
return gathered_predicted_embeddings
gathered_predicted_embeddings = self.execute(graph_fn, [])
expected_gathered_embeddings_0 = predicted_embedding_feature_map_np[
0, y_indices[0], x_indices[0], :]
expected_gathered_embeddings_1 = predicted_embedding_feature_map_np[
1, y_indices[1], x_indices[1], :]
expected_gathered_embeddings = np.stack([
expected_gathered_embeddings_0,
expected_gathered_embeddings_1], axis=0)
expected_gathered_embeddings = np.reshape(
expected_gathered_embeddings,
[batch_size, num_instances, embedding_size])
np.testing.assert_allclose(expected_gathered_embeddings,
gathered_predicted_embeddings)
# Common parameters for setting up testing examples across tests.
_NUM_CLASSES = 10
_KEYPOINT_INDICES = [0, 1, 2, 3]
_NUM_KEYPOINTS = len(_KEYPOINT_INDICES)
_DENSEPOSE_NUM_PARTS = 24
_TASK_NAME = 'human_pose'
_NUM_TRACK_IDS = 3
_REID_EMBED_SIZE = 2
_NUM_FC_LAYERS = 1
def get_fake_center_params(max_box_predictions=5):
"""Returns the fake object center parameter namedtuple."""
return cnma.ObjectCenterParams(
classification_loss=losses.WeightedSigmoidClassificationLoss(),
object_center_loss_weight=1.0,
min_box_overlap_iou=1.0,
max_box_predictions=max_box_predictions,
use_labeled_classes=False,
center_head_num_filters=[128],
center_head_kernel_sizes=[5])
def get_fake_od_params():
"""Returns the fake object detection parameter namedtuple."""
return cnma.ObjectDetectionParams(
localization_loss=losses.L1LocalizationLoss(),
offset_loss_weight=1.0,
scale_loss_weight=0.1)
def get_fake_kp_params(num_candidates_per_keypoint=100,
per_keypoint_offset=False,
predict_depth=False,
per_keypoint_depth=False,
peak_radius=0,
candidate_ranking_mode='min_distance',
argmax_postprocessing=False,
rescore_instances=False):
"""Returns the fake keypoint estimation parameter namedtuple."""
return cnma.KeypointEstimationParams(
task_name=_TASK_NAME,
class_id=1,
keypoint_indices=_KEYPOINT_INDICES,
keypoint_std_dev=[0.00001] * len(_KEYPOINT_INDICES),
classification_loss=losses.WeightedSigmoidClassificationLoss(),
localization_loss=losses.L1LocalizationLoss(),
unmatched_keypoint_score=0.1,
keypoint_candidate_score_threshold=0.1,
num_candidates_per_keypoint=num_candidates_per_keypoint,
per_keypoint_offset=per_keypoint_offset,
predict_depth=predict_depth,
per_keypoint_depth=per_keypoint_depth,
offset_peak_radius=peak_radius,
candidate_ranking_mode=candidate_ranking_mode,
argmax_postprocessing=argmax_postprocessing,
rescore_instances=rescore_instances,
rescoring_threshold=0.5)
def get_fake_mask_params():
"""Returns the fake mask estimation parameter namedtuple."""
return cnma.MaskParams(
classification_loss=losses.WeightedSoftmaxClassificationLoss(),
task_loss_weight=1.0,
mask_height=4,
mask_width=4,
mask_head_num_filters=[96],
mask_head_kernel_sizes=[3])
def get_fake_densepose_params():
"""Returns the fake DensePose estimation parameter namedtuple."""
return cnma.DensePoseParams(
class_id=1,
classification_loss=losses.WeightedSoftmaxClassificationLoss(),
localization_loss=losses.L1LocalizationLoss(),
part_loss_weight=1.0,
coordinate_loss_weight=1.0,
num_parts=_DENSEPOSE_NUM_PARTS,
task_loss_weight=1.0,
upsample_to_input_res=True,
upsample_method='nearest')
def get_fake_track_params():
"""Returns the fake object tracking parameter namedtuple."""
return cnma.TrackParams(
num_track_ids=_NUM_TRACK_IDS,
reid_embed_size=_REID_EMBED_SIZE,
num_fc_layers=_NUM_FC_LAYERS,
classification_loss=losses.WeightedSoftmaxClassificationLoss(),
task_loss_weight=1.0)
def get_fake_temporal_offset_params():
"""Returns the fake temporal offset parameter namedtuple."""
return cnma.TemporalOffsetParams(
localization_loss=losses.WeightedSmoothL1LocalizationLoss(),
task_loss_weight=1.0)
def build_center_net_meta_arch(build_resnet=False,
num_classes=_NUM_CLASSES,
max_box_predictions=5,
apply_non_max_suppression=False,
detection_only=False,
per_keypoint_offset=False,
predict_depth=False,
per_keypoint_depth=False,
peak_radius=0,
keypoint_only=False,
candidate_ranking_mode='min_distance',
argmax_postprocessing=False,
rescore_instances=False):
"""Builds the CenterNet meta architecture."""
if build_resnet:
feature_extractor = (
center_net_resnet_feature_extractor.CenterNetResnetFeatureExtractor(
'resnet_v2_101'))
else:
feature_extractor = DummyFeatureExtractor(
channel_means=(1.0, 2.0, 3.0),
channel_stds=(10., 20., 30.),
bgr_ordering=False,
num_feature_outputs=2,
stride=4)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=128,
max_dimension=128,
pad_to_max_dimesnion=True)
non_max_suppression_fn = None
if apply_non_max_suppression:
post_processing_proto = post_processing_pb2.PostProcessing()
post_processing_proto.batch_non_max_suppression.iou_threshold = 0.6
post_processing_proto.batch_non_max_suppression.score_threshold = 0.6
(post_processing_proto.batch_non_max_suppression.max_total_detections
) = max_box_predictions
(post_processing_proto.batch_non_max_suppression.max_detections_per_class
) = max_box_predictions
(post_processing_proto.batch_non_max_suppression.change_coordinate_frame
) = False
non_max_suppression_fn, _ = post_processing_builder.build(
post_processing_proto)
if keypoint_only:
num_candidates_per_keypoint = 100 if max_box_predictions > 1 else 1
return cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(max_box_predictions),
keypoint_params_dict={
_TASK_NAME:
get_fake_kp_params(num_candidates_per_keypoint,
per_keypoint_offset, predict_depth,
per_keypoint_depth, peak_radius,
candidate_ranking_mode,
argmax_postprocessing, rescore_instances)
},
non_max_suppression_fn=non_max_suppression_fn)
elif detection_only:
return cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(max_box_predictions),
object_detection_params=get_fake_od_params(),
non_max_suppression_fn=non_max_suppression_fn)
elif num_classes == 1:
num_candidates_per_keypoint = 100 if max_box_predictions > 1 else 1
return cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(max_box_predictions),
object_detection_params=get_fake_od_params(),
keypoint_params_dict={
_TASK_NAME:
get_fake_kp_params(num_candidates_per_keypoint,
per_keypoint_offset, predict_depth,
per_keypoint_depth, peak_radius,
candidate_ranking_mode,
argmax_postprocessing, rescore_instances)
},
non_max_suppression_fn=non_max_suppression_fn)
else:
return cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(),
object_detection_params=get_fake_od_params(),
keypoint_params_dict={_TASK_NAME: get_fake_kp_params(
candidate_ranking_mode=candidate_ranking_mode)},
mask_params=get_fake_mask_params(),
densepose_params=get_fake_densepose_params(),
track_params=get_fake_track_params(),
temporal_offset_params=get_fake_temporal_offset_params(),
non_max_suppression_fn=non_max_suppression_fn)
def _logit(p):
return np.log(
(p + np.finfo(np.float32).eps) / (1 - p + np.finfo(np.float32).eps))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchLibTest(test_case.TestCase):
"""Test for CenterNet meta architecture related functions."""
def test_get_keypoint_name(self):
self.assertEqual('human_pose/keypoint_offset',
cnma.get_keypoint_name('human_pose', 'keypoint_offset'))
def test_get_num_instances_from_weights(self):
weight1 = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32)
weight2 = tf.constant([0.5, 0.9, 0.0], dtype=tf.float32)
weight3 = tf.constant([0.0, 0.0, 1.0], dtype=tf.float32)
def graph_fn_1():
# Total of three elements with non-zero values.
num_instances = cnma.get_num_instances_from_weights(
[weight1, weight2, weight3])
return num_instances
num_instances = self.execute(graph_fn_1, [])
self.assertAlmostEqual(3, num_instances)
# No non-zero value in the weights. Return minimum value: 1.
def graph_fn_2():
# Total of three elements with non-zero values.
num_instances = cnma.get_num_instances_from_weights([weight1, weight1])
return num_instances
num_instances = self.execute(graph_fn_2, [])
self.assertAlmostEqual(1, num_instances)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchTest(test_case.TestCase, parameterized.TestCase):
"""Tests for the CenterNet meta architecture."""
def test_construct_prediction_heads(self):
model = build_center_net_meta_arch()
fake_feature_map = np.zeros((4, 128, 128, 8))
# Check the dictionary contains expected keys and corresponding heads with
# correct dimensions.
# "object center" head:
output = model._prediction_head_dict[cnma.OBJECT_CENTER][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape)
# "object scale" (height/width) head:
output = model._prediction_head_dict[cnma.BOX_SCALE][-1](fake_feature_map)
self.assertEqual((4, 128, 128, 2), output.shape)
# "object offset" head:
output = model._prediction_head_dict[cnma.BOX_OFFSET][-1](fake_feature_map)
self.assertEqual((4, 128, 128, 2), output.shape)
# "keypoint offset" head:
output = model._prediction_head_dict[
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET)][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, 2), output.shape)
# "keypoint heatmap" head:
output = model._prediction_head_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_HEATMAP)][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _NUM_KEYPOINTS), output.shape)
# "keypoint regression" head:
output = model._prediction_head_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_REGRESSION)][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, 2 * _NUM_KEYPOINTS), output.shape)
# "mask" head:
output = model._prediction_head_dict[cnma.SEGMENTATION_HEATMAP][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape)
# "densepose parts" head:
output = model._prediction_head_dict[cnma.DENSEPOSE_HEATMAP][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _DENSEPOSE_NUM_PARTS), output.shape)
# "densepose surface coordinates" head:
output = model._prediction_head_dict[cnma.DENSEPOSE_REGRESSION][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, 2 * _DENSEPOSE_NUM_PARTS), output.shape)
# "track embedding" head:
output = model._prediction_head_dict[cnma.TRACK_REID][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _REID_EMBED_SIZE), output.shape)
# "temporal offset" head:
output = model._prediction_head_dict[cnma.TEMPORAL_OFFSET][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, 2), output.shape)
def test_initialize_target_assigners(self):
model = build_center_net_meta_arch()
assigner_dict = model._initialize_target_assigners(
stride=2,
min_box_overlap_iou=0.7)
# Check whether the correponding target assigner class is initialized.
# object center target assigner:
self.assertIsInstance(assigner_dict[cnma.OBJECT_CENTER],
cn_assigner.CenterNetCenterHeatmapTargetAssigner)
# object detection target assigner:
self.assertIsInstance(assigner_dict[cnma.DETECTION_TASK],
cn_assigner.CenterNetBoxTargetAssigner)
# keypoint estimation target assigner:
self.assertIsInstance(assigner_dict[_TASK_NAME],
cn_assigner.CenterNetKeypointTargetAssigner)
# mask estimation target assigner:
self.assertIsInstance(assigner_dict[cnma.SEGMENTATION_TASK],
cn_assigner.CenterNetMaskTargetAssigner)
# DensePose estimation target assigner:
self.assertIsInstance(assigner_dict[cnma.DENSEPOSE_TASK],
cn_assigner.CenterNetDensePoseTargetAssigner)
# Track estimation target assigner:
self.assertIsInstance(assigner_dict[cnma.TRACK_TASK],
cn_assigner.CenterNetTrackTargetAssigner)
# Temporal Offset target assigner:
self.assertIsInstance(assigner_dict[cnma.TEMPORALOFFSET_TASK],
cn_assigner.CenterNetTemporalOffsetTargetAssigner)
def test_predict(self):
"""Test the predict function."""
model = build_center_net_meta_arch()
def graph_fn():
prediction_dict = model.predict(tf.zeros([2, 128, 128, 3]), None)
return prediction_dict
prediction_dict = self.execute(graph_fn, [])
self.assertEqual(prediction_dict['preprocessed_inputs'].shape,
(2, 128, 128, 3))
self.assertEqual(prediction_dict[cnma.OBJECT_CENTER][0].shape,
(2, 32, 32, _NUM_CLASSES))
self.assertEqual(prediction_dict[cnma.BOX_SCALE][0].shape,
(2, 32, 32, 2))
self.assertEqual(prediction_dict[cnma.BOX_OFFSET][0].shape,
(2, 32, 32, 2))
self.assertEqual(prediction_dict[cnma.SEGMENTATION_HEATMAP][0].shape,
(2, 32, 32, _NUM_CLASSES))
self.assertEqual(prediction_dict[cnma.DENSEPOSE_HEATMAP][0].shape,
(2, 32, 32, _DENSEPOSE_NUM_PARTS))
self.assertEqual(prediction_dict[cnma.DENSEPOSE_REGRESSION][0].shape,
(2, 32, 32, 2 * _DENSEPOSE_NUM_PARTS))
self.assertEqual(prediction_dict[cnma.TRACK_REID][0].shape,
(2, 32, 32, _REID_EMBED_SIZE))
self.assertEqual(prediction_dict[cnma.TEMPORAL_OFFSET][0].shape,
(2, 32, 32, 2))
def test_loss(self):
"""Test the loss function."""
groundtruth_dict = get_fake_groundtruth_dict(16, 32, 4)
model = build_center_net_meta_arch()
model.provide_groundtruth(
groundtruth_boxes_list=groundtruth_dict[fields.BoxListFields.boxes],
groundtruth_weights_list=groundtruth_dict[fields.BoxListFields.weights],
groundtruth_classes_list=groundtruth_dict[fields.BoxListFields.classes],
groundtruth_keypoints_list=groundtruth_dict[
fields.BoxListFields.keypoints],
groundtruth_masks_list=groundtruth_dict[
fields.BoxListFields.masks],
groundtruth_dp_num_points_list=groundtruth_dict[
fields.BoxListFields.densepose_num_points],
groundtruth_dp_part_ids_list=groundtruth_dict[
fields.BoxListFields.densepose_part_ids],
groundtruth_dp_surface_coords_list=groundtruth_dict[
fields.BoxListFields.densepose_surface_coords],
groundtruth_track_ids_list=groundtruth_dict[
fields.BoxListFields.track_ids],
groundtruth_track_match_flags_list=groundtruth_dict[
fields.BoxListFields.track_match_flags],
groundtruth_temporal_offsets_list=groundtruth_dict[
fields.BoxListFields.temporal_offsets])
kernel_initializer = tf.constant_initializer(
[[1, 1, 0], [-1000000, -1000000, 1000000]])
model.track_reid_classification_net = tf.keras.layers.Dense(
_NUM_TRACK_IDS,
kernel_initializer=kernel_initializer,
input_shape=(_REID_EMBED_SIZE,))
prediction_dict = get_fake_prediction_dict(
input_height=16, input_width=32, stride=4)
def graph_fn():
loss_dict = model.loss(prediction_dict,
tf.constant([[16, 24, 3], [16, 24, 3]]))
return loss_dict
loss_dict = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET)])
self.assertGreater(
0.01,
loss_dict['%s/%s' %
(cnma.LOSS_KEY_PREFIX,
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP))])
self.assertGreater(
0.01,
loss_dict['%s/%s' %
(cnma.LOSS_KEY_PREFIX,
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET))])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_REGRESSION))])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.SEGMENTATION_HEATMAP)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.DENSEPOSE_HEATMAP)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.DENSEPOSE_REGRESSION)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.TRACK_REID)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.TEMPORAL_OFFSET)])
@parameterized.parameters(
{'target_class_id': 1, 'with_true_image_shape': True},
{'target_class_id': 2, 'with_true_image_shape': True},
{'target_class_id': 1, 'with_true_image_shape': False},
)
def test_postprocess(self, target_class_id, with_true_image_shape):
"""Test the postprocess function."""
model = build_center_net_meta_arch()
max_detection = model._center_params.max_box_predictions
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
unmatched_keypoint_score = (
model._kp_params_dict[_TASK_NAME].unmatched_keypoint_score)
class_center = np.zeros((1, 32, 32, 10), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.ones(
(1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.001)
keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.ones(10) * _logit(0.25)
class_probs[target_class_id] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
segmentation_heatmap = np.zeros((1, 32, 32, 10), dtype=np.float32)
segmentation_heatmap[:, 14:18, 14:18, target_class_id] = 1.0
segmentation_heatmap = _logit(segmentation_heatmap)
dp_part_ind = 4
dp_part_heatmap = np.zeros((1, 32, 32, _DENSEPOSE_NUM_PARTS),
dtype=np.float32)
dp_part_heatmap[0, 14:18, 14:18, dp_part_ind] = 1.0
dp_part_heatmap = _logit(dp_part_heatmap)
dp_surf_coords = np.random.randn(1, 32, 32, 2 * _DENSEPOSE_NUM_PARTS)
embedding_size = 100
track_reid_embedding = np.zeros((1, 32, 32, embedding_size),
dtype=np.float32)
track_reid_embedding[0, 16, 16, :] = np.ones(embedding_size)
temporal_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
temporal_offsets[..., 1] = 1
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
segmentation_heatmap = tf.constant(segmentation_heatmap, dtype=tf.float32)
dp_part_heatmap = tf.constant(dp_part_heatmap, dtype=tf.float32)
dp_surf_coords = tf.constant(dp_surf_coords, dtype=tf.float32)
track_reid_embedding = tf.constant(track_reid_embedding, dtype=tf.float32)
temporal_offsets = tf.constant(temporal_offsets, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
cnma.SEGMENTATION_HEATMAP: [segmentation_heatmap],
cnma.DENSEPOSE_HEATMAP: [dp_part_heatmap],
cnma.DENSEPOSE_REGRESSION: [dp_surf_coords],
cnma.TRACK_REID: [track_reid_embedding],
cnma.TEMPORAL_OFFSET: [temporal_offsets],
}
def graph_fn():
if with_true_image_shape:
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
else:
detections = model.postprocess(prediction_dict, None)
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_boxes'][0, 0],
np.array([55, 46, 75, 86]) / 128.0)
self.assertAllClose(detections['detection_scores'][0],
[.75, .5, .5, .5, .5])
expected_multiclass_scores = [.25] * 10
expected_multiclass_scores[target_class_id] = .75
self.assertAllClose(expected_multiclass_scores,
detections['detection_multiclass_scores'][0][0])
# The output embedding extracted at the object center will be a 3-D array of
# shape [batch, num_boxes, embedding_size]. The valid predicted embedding
# will be the first embedding in the first batch. It is a 1-D array of
# shape [embedding_size] with values all ones. All the values of the
# embedding will then be divided by the square root of 'embedding_size'
# after the L2 normalization.
self.assertAllClose(detections['detection_embeddings'][0, 0],
np.ones(embedding_size) / embedding_size**0.5)
self.assertEqual(detections['detection_classes'][0, 0], target_class_id)
self.assertEqual(detections['num_detections'], [5])
self.assertAllEqual([1, max_detection, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllEqual([1, max_detection, num_keypoints],
detections['detection_keypoint_scores'].shape)
self.assertAllEqual([1, max_detection, 4, 4],
detections['detection_masks'].shape)
self.assertAllEqual([1, max_detection, embedding_size],
detections['detection_embeddings'].shape)
self.assertAllEqual([1, max_detection, 2],
detections['detection_temporal_offsets'].shape)
# Masks should be empty for everything but the first detection.
self.assertAllEqual(
detections['detection_masks'][0, 1:, :, :],
np.zeros_like(detections['detection_masks'][0, 1:, :, :]))
self.assertAllEqual(
detections['detection_surface_coords'][0, 1:, :, :],
np.zeros_like(detections['detection_surface_coords'][0, 1:, :, :]))
if target_class_id == 1:
expected_kpts_for_obj_0 = np.array(
[[14., 14.], [14., 18.], [18., 14.], [17., 17.]]) / 32.
expected_kpt_scores_for_obj_0 = np.array(
[0.9, 0.9, 0.9, unmatched_keypoint_score])
np.testing.assert_allclose(detections['detection_keypoints'][0][0],
expected_kpts_for_obj_0, rtol=1e-6)
np.testing.assert_allclose(detections['detection_keypoint_scores'][0][0],
expected_kpt_scores_for_obj_0, rtol=1e-6)
# First detection has DensePose parts.
self.assertSameElements(
np.unique(detections['detection_masks'][0, 0, :, :]),
set([0, dp_part_ind + 1]))
self.assertGreater(np.sum(np.abs(detections['detection_surface_coords'])),
0.0)
else:
# All keypoint outputs should be zeros.
np.testing.assert_allclose(
detections['detection_keypoints'][0][0],
np.zeros([num_keypoints, 2], float),
rtol=1e-6)
np.testing.assert_allclose(
detections['detection_keypoint_scores'][0][0],
np.zeros([num_keypoints], float),
rtol=1e-6)
# Binary segmentation mask.
self.assertSameElements(
np.unique(detections['detection_masks'][0, 0, :, :]),
set([0, 1]))
# No DensePose surface coordinates.
np.testing.assert_allclose(
detections['detection_surface_coords'][0, 0, :, :],
np.zeros_like(detections['detection_surface_coords'][0, 0, :, :]))
def test_postprocess_kpts_no_od(self):
"""Test the postprocess function."""
target_class_id = 1
model = build_center_net_meta_arch(keypoint_only=True)
max_detection = model._center_params.max_box_predictions
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 10), dtype=np.float32)
keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32)
keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.ones(10) * _logit(0.25)
class_probs[target_class_id] = _logit(0.75)
class_center[0, 16, 16] = class_probs
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
# def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
# return detections
# detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_scores'][0],
[.75, .5, .5, .5, .5])
expected_multiclass_scores = [.25] * 10
expected_multiclass_scores[target_class_id] = .75
self.assertAllClose(expected_multiclass_scores,
detections['detection_multiclass_scores'][0][0])
self.assertEqual(detections['detection_classes'][0, 0], target_class_id)
self.assertEqual(detections['num_detections'], [5])
self.assertAllEqual([1, max_detection, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllEqual([1, max_detection, num_keypoints],
detections['detection_keypoint_scores'].shape)
def test_non_max_suppression(self):
"""Tests application of NMS on CenterNet detections."""
target_class_id = 1
model = build_center_net_meta_arch(apply_non_max_suppression=True,
detection_only=True)
class_center = np.zeros((1, 32, 32, 10), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
class_probs = np.ones(10) * _logit(0.25)
class_probs[target_class_id] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
num_detections = int(detections['num_detections'])
self.assertEqual(num_detections, 1)
self.assertAllClose(detections['detection_boxes'][0, 0],
np.array([55, 46, 75, 86]) / 128.0)
self.assertAllClose(detections['detection_scores'][0][:num_detections],
[.75])
expected_multiclass_scores = [.25] * 10
expected_multiclass_scores[target_class_id] = .75
self.assertAllClose(expected_multiclass_scores,
detections['detection_multiclass_scores'][0][0])
def test_non_max_suppression_with_kpts_rescoring(self):
"""Tests application of NMS on CenterNet detections and keypoints."""
model = build_center_net_meta_arch(
num_classes=1, max_box_predictions=5, per_keypoint_offset=True,
candidate_ranking_mode='min_distance',
argmax_postprocessing=False, apply_non_max_suppression=True,
rescore_instances=True)
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 2), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.ones(
(1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.01)
keypoint_offsets = np.zeros(
(1, 32, 32, num_keypoints * 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(2)
class_probs[1] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
class_center[0, 16, 17] = class_probs
height_width[0, 16, 17] = [5, 10]
offset[0, 16, 17] = [.25, .5]
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
num_detections = int(detections['num_detections'])
# One of the box is filtered by NMS.
self.assertEqual(num_detections, 1)
# The keypoint scores are [0.9, 0.9, 0.9, 0.1] and the resulting rescored
# score is 0.9 * 3 / 4 = 0.675.
self.assertAllClose(detections['detection_scores'][0][:num_detections],
[0.675])
@parameterized.parameters(
{
'candidate_ranking_mode': 'min_distance',
'argmax_postprocessing': False
},
{
'candidate_ranking_mode': 'gaussian_weighted_const',
'argmax_postprocessing': True
})
def test_postprocess_single_class(self, candidate_ranking_mode,
argmax_postprocessing):
"""Test the postprocess function."""
model = build_center_net_meta_arch(
num_classes=1, max_box_predictions=5, per_keypoint_offset=True,
candidate_ranking_mode=candidate_ranking_mode,
argmax_postprocessing=argmax_postprocessing)
max_detection = model._center_params.max_box_predictions
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 1), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.ones(
(1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.01)
keypoint_offsets = np.zeros(
(1, 32, 32, num_keypoints * 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(1)
class_probs[0] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_boxes'][0, 0],
np.array([55, 46, 75, 86]) / 128.0)
self.assertAllClose(detections['detection_scores'][0],
[.75, .5, .5, .5, .5])
self.assertEqual(detections['detection_classes'][0, 0], 0)
self.assertEqual(detections['num_detections'], [5])
self.assertAllEqual([1, max_detection, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllClose(
[[0.4375, 0.4375], [0.4375, 0.5625], [0.5625, 0.4375]],
detections['detection_keypoints'][0, 0, 0:3, :])
self.assertAllEqual([1, max_detection, num_keypoints],
detections['detection_keypoint_scores'].shape)
def test_postprocess_single_instance(self):
"""Test the postprocess single instance function."""
model = build_center_net_meta_arch(
num_classes=1, candidate_ranking_mode='score_distance_ratio')
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 1), dtype=np.float32)
keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32)
keypoint_offsets = np.zeros(
(1, 32, 32, num_keypoints * 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(1)
class_probs[0] = _logit(0.75)
class_center[0, 16, 16] = class_probs
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
def graph_fn():
detections = model.postprocess_single_instance_keypoints(
prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllEqual([1, 1, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllEqual([1, 1, num_keypoints],
detections['detection_keypoint_scores'].shape)
@parameterized.parameters(
{'per_keypoint_depth': False},
{'per_keypoint_depth': True},
)
def test_postprocess_single_class_depth(self, per_keypoint_depth):
"""Test the postprocess function."""
model = build_center_net_meta_arch(
num_classes=1,
per_keypoint_offset=per_keypoint_depth,
predict_depth=True,
per_keypoint_depth=per_keypoint_depth)
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 1), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.ones(
(1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.001)
keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(1)
class_probs[0] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
keypoint_regression[0, 16, 16] = [-1., -1., -1., 1., 1., -1., 1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
if per_keypoint_depth:
keypoint_depth = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32)
keypoint_depth[0, 14, 14, 0] = -1.0
keypoint_depth[0, 14, 18, 1] = -1.1
keypoint_depth[0, 18, 14, 2] = -1.2
keypoint_depth[0, 18, 18, 3] = -1.3
else:
keypoint_depth = np.zeros((1, 32, 32, 1), dtype=np.float32)
keypoint_depth[0, 14, 14, 0] = -1.0
keypoint_depth[0, 14, 18, 0] = -1.1
keypoint_depth[0, 18, 14, 0] = -1.2
keypoint_depth[0, 18, 18, 0] = -1.3
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
keypoint_depth = tf.constant(keypoint_depth, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME,
cnma.KEYPOINT_HEATMAP): [keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME,
cnma.KEYPOINT_OFFSET): [keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME,
cnma.KEYPOINT_REGRESSION): [keypoint_regression],
cnma.get_keypoint_name(_TASK_NAME,
cnma.KEYPOINT_DEPTH): [keypoint_depth]
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_keypoint_depths'][0, 0],
np.array([-1.0, -1.1, -1.2, 0.0]))
self.assertAllClose(detections['detection_keypoint_scores'][0, 0],
np.array([0.9, 0.9, 0.9, 0.1]))
def test_mask_object_center_in_postprocess_by_true_image_shape(self):
"""Test the postprocess function is masked by true_image_shape."""
model = build_center_net_meta_arch(num_classes=1)
max_detection = model._center_params.max_box_predictions
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 1), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32)
keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(1)
class_probs[0] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
print(class_center)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[1, 1, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_boxes'][0, 0],
np.array([0, 0, 0, 0]))
# The class_center logits are initialized as 0's so it's filled with 0.5s.
# Despite that, we should only find one box.
self.assertAllClose(detections['detection_scores'][0],
[0.5, 0., 0., 0., 0.])
self.assertEqual(np.sum(detections['detection_classes']), 0)
self.assertEqual(detections['num_detections'], [1])
self.assertAllEqual([1, max_detection, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllEqual([1, max_detection, num_keypoints],
detections['detection_keypoint_scores'].shape)
def test_get_instance_indices(self):
classes = tf.constant([[0, 1, 2, 0], [2, 1, 2, 2]], dtype=tf.int32)
num_detections = tf.constant([1, 3], dtype=tf.int32)
batch_index = 1
class_id = 2
model = build_center_net_meta_arch()
valid_indices = model._get_instance_indices(
classes, num_detections, batch_index, class_id)
self.assertAllEqual(valid_indices.numpy(), [0, 2])
def test_rescore_instances(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(1.0, 2.0, 3.0),
channel_stds=(10., 20., 30.),
bgr_ordering=False,
num_feature_outputs=2,
stride=4)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=128,
max_dimension=128,
pad_to_max_dimesnion=True)
kp_params_1 = cnma.KeypointEstimationParams(
task_name='kpt_task_1',
class_id=0,
keypoint_indices=[0, 1, 2],
keypoint_std_dev=[0.00001] * 3,
classification_loss=losses.WeightedSigmoidClassificationLoss(),
localization_loss=losses.L1LocalizationLoss(),
keypoint_candidate_score_threshold=0.1,
rescore_instances=True) # Note rescoring for class_id = 0.
kp_params_2 = cnma.KeypointEstimationParams(
task_name='kpt_task_2',
class_id=1,
keypoint_indices=[3, 4],
keypoint_std_dev=[0.00001] * 2,
classification_loss=losses.WeightedSigmoidClassificationLoss(),
localization_loss=losses.L1LocalizationLoss(),
keypoint_candidate_score_threshold=0.1,
rescore_instances=False)
model = cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=2,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(),
object_detection_params=get_fake_od_params(),
keypoint_params_dict={
'kpt_task_1': kp_params_1,
'kpt_task_2': kp_params_2,
})
def graph_fn():
classes = tf.constant([[1, 0]], dtype=tf.int32)
scores = tf.constant([[0.5, 0.75]], dtype=tf.float32)
keypoint_scores = tf.constant(
[
[[0.1, 0.0, 0.3, 0.4, 0.5],
[0.1, 0.2, 0.3, 0.4, 0.5]],
])
new_scores = model._rescore_instances(classes, scores, keypoint_scores)
return new_scores
new_scores = self.execute_cpu(graph_fn, [])
expected_scores = np.array(
[[0.5, 0.75 * (0.1 + 0.3)/2]]
)
self.assertAllClose(expected_scores, new_scores)
def get_fake_prediction_dict(input_height,
input_width,
stride,
per_keypoint_depth=False):
"""Prepares the fake prediction dictionary."""
output_height = input_height // stride
output_width = input_width // stride
object_center = np.zeros((2, output_height, output_width, _NUM_CLASSES),
dtype=np.float32)
# Box center:
# y: floor((0.54 + 0.56) / 2 * 4) = 2,
# x: floor((0.54 + 0.56) / 2 * 8) = 4
object_center[0, 2, 4, 1] = 1.0
object_center = _logit(object_center)
# Box size:
# height: (0.56 - 0.54) * 4 = 0.08
# width: (0.56 - 0.54) * 8 = 0.16
object_scale = np.zeros((2, output_height, output_width, 2), dtype=np.float32)
object_scale[0, 2, 4] = 0.08, 0.16
# Box center offset coordinate (0.55, 0.55):
# y-offset: 0.55 * 4 - 2 = 0.2
# x-offset: 0.55 * 8 - 4 = 0.4
object_offset = np.zeros((2, output_height, output_width, 2),
dtype=np.float32)
object_offset[0, 2, 4] = 0.2, 0.4
keypoint_heatmap = np.zeros((2, output_height, output_width, _NUM_KEYPOINTS),
dtype=np.float32)
keypoint_heatmap[0, 2, 4, 1] = 1.0
keypoint_heatmap[0, 2, 4, 3] = 1.0
keypoint_heatmap = _logit(keypoint_heatmap)
keypoint_offset = np.zeros((2, output_height, output_width, 2),
dtype=np.float32)
keypoint_offset[0, 2, 4] = 0.2, 0.4
keypoint_depth = np.zeros((2, output_height, output_width,
_NUM_KEYPOINTS if per_keypoint_depth else 1),
dtype=np.float32)
keypoint_depth[0, 2, 4] = 3.0
keypoint_regression = np.zeros(
(2, output_height, output_width, 2 * _NUM_KEYPOINTS), dtype=np.float32)
keypoint_regression[0, 2, 4] = 0.0, 0.0, 0.2, 0.4, 0.0, 0.0, 0.2, 0.4
mask_heatmap = np.zeros((2, output_height, output_width, _NUM_CLASSES),
dtype=np.float32)
mask_heatmap[0, 2, 4, 1] = 1.0
mask_heatmap = _logit(mask_heatmap)
densepose_heatmap = np.zeros((2, output_height, output_width,
_DENSEPOSE_NUM_PARTS), dtype=np.float32)
densepose_heatmap[0, 2, 4, 5] = 1.0
densepose_heatmap = _logit(densepose_heatmap)
densepose_regression = np.zeros((2, output_height, output_width,
2 * _DENSEPOSE_NUM_PARTS), dtype=np.float32)
# The surface coordinate indices for part index 5 are:
# (5 * 2, 5 * 2 + 1), or (10, 11).
densepose_regression[0, 2, 4, 10:12] = 0.4, 0.7
track_reid_embedding = np.zeros((2, output_height, output_width,
_REID_EMBED_SIZE), dtype=np.float32)
track_reid_embedding[0, 2, 4, :] = np.arange(_REID_EMBED_SIZE)
temporal_offsets = np.zeros((2, output_height, output_width, 2),
dtype=np.float32)
temporal_offsets[0, 2, 4, :] = 5
prediction_dict = {
'preprocessed_inputs':
tf.zeros((2, input_height, input_width, 3)),
cnma.OBJECT_CENTER: [
tf.constant(object_center),
tf.constant(object_center)
],
cnma.BOX_SCALE: [tf.constant(object_scale),
tf.constant(object_scale)],
cnma.BOX_OFFSET: [tf.constant(object_offset),
tf.constant(object_offset)],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [
tf.constant(keypoint_heatmap),
tf.constant(keypoint_heatmap)
],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [
tf.constant(keypoint_offset),
tf.constant(keypoint_offset)
],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [
tf.constant(keypoint_regression),
tf.constant(keypoint_regression)
],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_DEPTH): [
tf.constant(keypoint_depth),
tf.constant(keypoint_depth)
],
cnma.SEGMENTATION_HEATMAP: [
tf.constant(mask_heatmap),
tf.constant(mask_heatmap)
],
cnma.DENSEPOSE_HEATMAP: [
tf.constant(densepose_heatmap),
tf.constant(densepose_heatmap),
],
cnma.DENSEPOSE_REGRESSION: [
tf.constant(densepose_regression),
tf.constant(densepose_regression),
],
cnma.TRACK_REID: [
tf.constant(track_reid_embedding),
tf.constant(track_reid_embedding),
],
cnma.TEMPORAL_OFFSET: [
tf.constant(temporal_offsets),
tf.constant(temporal_offsets),
],
}
return prediction_dict
def get_fake_groundtruth_dict(input_height,
input_width,
stride,
has_depth=False):
"""Prepares the fake groundtruth dictionary."""
# A small box with center at (0.55, 0.55).
boxes = [
tf.constant([[0.54, 0.54, 0.56, 0.56]]),
tf.constant([[0.0, 0.0, 0.5, 0.5]]),
]
classes = [
tf.one_hot([1], depth=_NUM_CLASSES),
tf.one_hot([0], depth=_NUM_CLASSES),
]
weights = [
tf.constant([1.]),
tf.constant([0.]),
]
keypoints = [
tf.tile(
tf.expand_dims(
tf.constant([[float('nan'), 0.55,
float('nan'), 0.55, 0.55, 0.0]]),
axis=2),
multiples=[1, 1, 2]),
tf.tile(
tf.expand_dims(
tf.constant([[float('nan'), 0.55,
float('nan'), 0.55, 0.55, 0.0]]),
axis=2),
multiples=[1, 1, 2]),
]
if has_depth:
keypoint_depths = [
tf.constant([[float('nan'), 3.0,
float('nan'), 3.0, 0.55, 0.0]]),
tf.constant([[float('nan'), 0.55,
float('nan'), 0.55, 0.55, 0.0]])
]
keypoint_depth_weights = [
tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.0]]),
tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.0]])
]
else:
keypoint_depths = [
tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),
tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
]
keypoint_depth_weights = [
tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),
tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
]
labeled_classes = [
tf.one_hot([1], depth=_NUM_CLASSES) + tf.one_hot([2], depth=_NUM_CLASSES),
tf.one_hot([0], depth=_NUM_CLASSES) + tf.one_hot([1], depth=_NUM_CLASSES),
]
mask = np.zeros((1, input_height, input_width), dtype=np.float32)
mask[0, 8:8+stride, 16:16+stride] = 1
masks = [
tf.constant(mask),
tf.zeros_like(mask),
]
densepose_num_points = [
tf.constant([1], dtype=tf.int32),
tf.constant([0], dtype=tf.int32),
]
densepose_part_ids = [
tf.constant([[5, 0, 0]], dtype=tf.int32),
tf.constant([[0, 0, 0]], dtype=tf.int32),
]
densepose_surface_coords_np = np.zeros((1, 3, 4), dtype=np.float32)
densepose_surface_coords_np[0, 0, :] = 0.55, 0.55, 0.4, 0.7
densepose_surface_coords = [
tf.constant(densepose_surface_coords_np),
tf.zeros_like(densepose_surface_coords_np)
]
track_ids = [
tf.constant([2], dtype=tf.int32),
tf.constant([1], dtype=tf.int32),
]
temporal_offsets = [
tf.constant([[5.0, 5.0]], dtype=tf.float32),
tf.constant([[2.0, 3.0]], dtype=tf.float32),
]
track_match_flags = [
tf.constant([1.0], dtype=tf.float32),
tf.constant([1.0], dtype=tf.float32),
]
groundtruth_dict = {
fields.BoxListFields.boxes: boxes,
fields.BoxListFields.weights: weights,
fields.BoxListFields.classes: classes,
fields.BoxListFields.keypoints: keypoints,
fields.BoxListFields.keypoint_depths: keypoint_depths,
fields.BoxListFields.keypoint_depth_weights: keypoint_depth_weights,
fields.BoxListFields.masks: masks,
fields.BoxListFields.densepose_num_points: densepose_num_points,
fields.BoxListFields.densepose_part_ids: densepose_part_ids,
fields.BoxListFields.densepose_surface_coords: densepose_surface_coords,
fields.BoxListFields.track_ids: track_ids,
fields.BoxListFields.temporal_offsets: temporal_offsets,
fields.BoxListFields.track_match_flags: track_match_flags,
fields.InputDataFields.groundtruth_labeled_classes: labeled_classes,
}
return groundtruth_dict
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaComputeLossTest(test_case.TestCase, parameterized.TestCase):
"""Test for CenterNet loss compuation related functions."""
def setUp(self):
self.model = build_center_net_meta_arch()
self.classification_loss_fn = self.model._center_params.classification_loss
self.localization_loss_fn = self.model._od_params.localization_loss
self.true_image_shapes = tf.constant([[16, 24, 3], [16, 24, 3]])
self.input_height = 16
self.input_width = 32
self.stride = 4
self.per_pixel_weights = self.get_per_pixel_weights(self.true_image_shapes,
self.input_height,
self.input_width,
self.stride)
self.prediction_dict = get_fake_prediction_dict(self.input_height,
self.input_width,
self.stride)
self.model._groundtruth_lists = get_fake_groundtruth_dict(
self.input_height, self.input_width, self.stride)
super(CenterNetMetaComputeLossTest, self).setUp()
def get_per_pixel_weights(self, true_image_shapes, input_height, input_width,
stride):
output_height, output_width = (input_height // stride,
input_width // stride)
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(tf.to_float(true_image_shapes) / stride)
per_pixel_weights = cnma.get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
per_pixel_weights = tf.expand_dims(per_pixel_weights, 2)
return per_pixel_weights
def test_compute_object_center_loss(self):
def graph_fn():
loss = self.model._compute_object_center_loss(
object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER],
input_height=self.input_height,
input_width=self.input_width,
per_pixel_weights=self.per_pixel_weights)
return loss
loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
default_value = self.model._center_params.use_labeled_classes
self.model._center_params = (
self.model._center_params._replace(use_labeled_classes=True))
loss = self.model._compute_object_center_loss(
object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER],
input_height=self.input_height,
input_width=self.input_width,
per_pixel_weights=self.per_pixel_weights)
self.model._center_params = (
self.model._center_params._replace(use_labeled_classes=default_value))
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
def test_compute_box_scale_and_offset_loss(self):
def graph_fn():
scale_loss, offset_loss = self.model._compute_box_scale_and_offset_loss(
scale_predictions=self.prediction_dict[cnma.BOX_SCALE],
offset_predictions=self.prediction_dict[cnma.BOX_OFFSET],
input_height=self.input_height,
input_width=self.input_width)
return scale_loss, offset_loss
scale_loss, offset_loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, scale_loss)
self.assertGreater(0.01, offset_loss)
def test_compute_kp_heatmap_loss(self):
def graph_fn():
loss = self.model._compute_kp_heatmap_loss(
input_height=self.input_height,
input_width=self.input_width,
task_name=_TASK_NAME,
heatmap_predictions=self.prediction_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_HEATMAP)],
classification_loss_fn=self.classification_loss_fn,
per_pixel_weights=self.per_pixel_weights)
return loss
loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
def test_compute_kp_offset_loss(self):
def graph_fn():
loss = self.model._compute_kp_offset_loss(
input_height=self.input_height,
input_width=self.input_width,
task_name=_TASK_NAME,
offset_predictions=self.prediction_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_OFFSET)],
localization_loss_fn=self.localization_loss_fn)
return loss
loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
def test_compute_kp_regression_loss(self):
def graph_fn():
loss = self.model._compute_kp_regression_loss(
input_height=self.input_height,
input_width=self.input_width,
task_name=_TASK_NAME,
regression_predictions=self.prediction_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_REGRESSION,)],
localization_loss_fn=self.localization_loss_fn)
return loss
loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
@parameterized.parameters(
{'per_keypoint_depth': False},
{'per_keypoint_depth': True},
)
def test_compute_kp_depth_loss(self, per_keypoint_depth):
prediction_dict = get_fake_prediction_dict(
self.input_height,
self.input_width,
self.stride,
per_keypoint_depth=per_keypoint_depth)
model = build_center_net_meta_arch(
num_classes=1,
per_keypoint_offset=per_keypoint_depth,
predict_depth=True,
per_keypoint_depth=per_keypoint_depth,
peak_radius=1 if per_keypoint_depth else 0)
model._groundtruth_lists = get_fake_groundtruth_dict(
self.input_height, self.input_width, self.stride, has_depth=True)
def graph_fn():
loss = model._compute_kp_depth_loss(
input_height=self.input_height,
input_width=self.input_width,
task_name=_TASK_NAME,
depth_predictions=prediction_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_DEPTH)],
localization_loss_fn=self.localization_loss_fn)
return loss
loss = self.execute(graph_fn, [])
if per_keypoint_depth:
# The loss is computed on a disk with radius 1 but only the center pixel
# has the accurate prediction. The final loss is (4 * |3-0|) / 5 = 2.4
self.assertAlmostEqual(2.4, loss, delta=1e-4)
else:
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
def test_compute_track_embedding_loss(self):
default_fc = self.model.track_reid_classification_net
# Initialize the kernel to extreme values so that the classification score
# is close to (0, 0, 1) after the softmax layer.
kernel_initializer = tf.constant_initializer(
[[1, 1, 0], [-1000000, -1000000, 1000000]])
self.model.track_reid_classification_net = tf.keras.layers.Dense(
_NUM_TRACK_IDS,
kernel_initializer=kernel_initializer,
input_shape=(_REID_EMBED_SIZE,))
loss = self.model._compute_track_embedding_loss(
input_height=self.input_height,
input_width=self.input_width,
object_reid_predictions=self.prediction_dict[cnma.TRACK_REID])
self.model.track_reid_classification_net = default_fc
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchRestoreTest(test_case.TestCase):
def test_restore_map_resnet(self):
"""Test restore map for a resnet backbone."""
model = build_center_net_meta_arch(build_resnet=True)
restore_from_objects_map = model.restore_from_objects('classification')
self.assertIsInstance(restore_from_objects_map['feature_extractor'],
tf.keras.Model)
def test_retore_map_detection(self):
"""Test that detection checkpoints can be restored."""
model = build_center_net_meta_arch(build_resnet=True)
restore_from_objects_map = model.restore_from_objects('detection')
self.assertIsInstance(restore_from_objects_map['model']._feature_extractor,
tf.keras.Model)
class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor):
def __init__(self,
channel_means,
channel_stds,
bgr_ordering,
num_feature_outputs,
stride):
self._num_feature_outputs = num_feature_outputs
self._stride = stride
super(DummyFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def predict(self):
pass
def loss(self):
pass
def postprocess(self):
pass
def call(self, inputs):
batch_size, input_height, input_width, _ = inputs.shape
fake_output = tf.ones([
batch_size, input_height // self._stride, input_width // self._stride,
64
], dtype=tf.float32)
return [fake_output] * self._num_feature_outputs
@property
def out_stride(self):
return self._stride
@property
def num_feature_outputs(self):
return self._num_feature_outputs
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetFeatureExtractorTest(test_case.TestCase):
"""Test the base feature extractor class."""
def test_preprocess(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(1.0, 2.0, 3.0),
channel_stds=(10., 20., 30.), bgr_ordering=False,
num_feature_outputs=2, stride=4)
img = np.zeros((2, 32, 32, 3))
img[:, :, :] = 11, 22, 33
def graph_fn():
output = feature_extractor.preprocess(img)
return output
output = self.execute(graph_fn, [])
self.assertAlmostEqual(output.sum(), 2 * 32 * 32 * 3)
def test_preprocess_reverse(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(1.0, 2.0, 3.0),
channel_stds=(10., 20., 30.), bgr_ordering=False,
num_feature_outputs=2, stride=4)
img = np.zeros((2, 32, 32, 3))
img[:, :, :] = 11, 22, 33
def graph_fn():
output = feature_extractor.preprocess_reverse(
feature_extractor.preprocess(img))
return output
output = self.execute(graph_fn, [])
self.assertAllClose(img, output)
def test_bgr_ordering(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(0.0, 0.0, 0.0),
channel_stds=(1., 1., 1.), bgr_ordering=True,
num_feature_outputs=2, stride=4)
img = np.zeros((2, 32, 32, 3), dtype=np.float32)
img[:, :, :] = 1, 2, 3
def graph_fn():
output = feature_extractor.preprocess(img)
return output
output = self.execute(graph_fn, [])
self.assertAllClose(output[..., 2], 1 * np.ones((2, 32, 32)))
self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32)))
self.assertAllClose(output[..., 0], 3 * np.ones((2, 32, 32)))
def test_default_ordering(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(0.0, 0.0, 0.0),
channel_stds=(1., 1., 1.), bgr_ordering=False,
num_feature_outputs=2, stride=4)
img = np.zeros((2, 32, 32, 3), dtype=np.float32)
img[:, :, :] = 1, 2, 3
def graph_fn():
output = feature_extractor.preprocess(img)
return output
output = self.execute(graph_fn, [])
self.assertAllClose(output[..., 0], 1 * np.ones((2, 32, 32)))
self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32)))
self.assertAllClose(output[..., 2], 3 * np.ones((2, 32, 32)))
class Dummy1dFeatureExtractor(cnma.CenterNetFeatureExtractor):
"""Returns a static tensor."""
def __init__(self, tensor, out_stride=1, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Intializes the feature extractor.
Args:
tensor: The tensor to return as the processed feature.
out_stride: The out_stride to return if asked.
channel_means: Ignored, but provided for API compatability.
channel_stds: Ignored, but provided for API compatability.
bgr_ordering: Ignored, but provided for API compatability.
"""
super().__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
self._tensor = tensor
self._out_stride = out_stride
def call(self, inputs):
return [self._tensor]
@property
def out_stride(self):
"""The stride in the output image of the network."""
return self._out_stride
@property
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
return 1
@property
def supported_sub_model_types(self):
return ['detection']
def get_sub_model(self, sub_model_type):
if sub_model_type == 'detection':
return self._network
else:
ValueError('Sub model type "{}" not supported.'.format(sub_model_type))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArch1dTest(test_case.TestCase, parameterized.TestCase):
@parameterized.parameters([1, 2])
def test_outputs_with_correct_shape(self, stride):
# The 1D case reuses code from the 2D cases. These tests only check that
# the output shapes are correct, and relies on other tests for correctness.
batch_size = 2
height = 1
width = 32
channels = 16
unstrided_inputs = np.random.randn(
batch_size, height, width, channels)
fixed_output_features = np.random.randn(
batch_size, height, width // stride, channels)
max_boxes = 10
num_classes = 3
feature_extractor = Dummy1dFeatureExtractor(fixed_output_features, stride)
arch = cnma.CenterNetMetaArch(
is_training=True,
add_summaries=True,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=None,
object_center_params=cnma.ObjectCenterParams(
classification_loss=losses.PenaltyReducedLogisticFocalLoss(),
object_center_loss_weight=1.0,
max_box_predictions=max_boxes,
),
object_detection_params=cnma.ObjectDetectionParams(
localization_loss=losses.L1LocalizationLoss(),
scale_loss_weight=1.0,
offset_loss_weight=1.0,
),
keypoint_params_dict=None,
mask_params=None,
densepose_params=None,
track_params=None,
temporal_offset_params=None,
use_depthwise=False,
compute_heatmap_sparse=False,
non_max_suppression_fn=None,
unit_height_conv=True)
arch.provide_groundtruth(
groundtruth_boxes_list=[
tf.constant([[0, 0.5, 1.0, 0.75],
[0, 0.1, 1.0, 0.25]], tf.float32),
tf.constant([[0, 0, 1.0, 1.0],
[0, 0, 0.0, 0.0]], tf.float32)
],
groundtruth_classes_list=[
tf.constant([[0, 0, 1],
[0, 1, 0]], tf.float32),
tf.constant([[1, 0, 0],
[0, 0, 0]], tf.float32)
],
groundtruth_weights_list=[
tf.constant([1.0, 1.0]),
tf.constant([1.0, 0.0])]
)
predictions = arch.predict(None, None) # input is hardcoded above.
predictions['preprocessed_inputs'] = tf.constant(unstrided_inputs)
true_shapes = tf.constant([[1, 32, 16], [1, 24, 16]], tf.int32)
postprocess_output = arch.postprocess(predictions, true_shapes)
losses_output = arch.loss(predictions, true_shapes)
self.assertIn('extracted_features', predictions)
self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER),
losses_output)
self.assertEqual((), losses_output['%s/%s' % (
cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER)].shape)
self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE),
losses_output)
self.assertEqual((), losses_output['%s/%s' % (
cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE)].shape)
self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET),
losses_output)
self.assertEqual((), losses_output['%s/%s' % (
cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET)].shape)
self.assertIn('detection_scores', postprocess_output)
self.assertEqual(postprocess_output['detection_scores'].shape,
(batch_size, max_boxes))
self.assertIn('detection_multiclass_scores', postprocess_output)
self.assertEqual(postprocess_output['detection_multiclass_scores'].shape,
(batch_size, max_boxes, num_classes))
self.assertIn('detection_classes', postprocess_output)
self.assertEqual(postprocess_output['detection_classes'].shape,
(batch_size, max_boxes))
self.assertIn('num_detections', postprocess_output)
self.assertEqual(postprocess_output['num_detections'].shape,
(batch_size,))
self.assertIn('detection_boxes', postprocess_output)
self.assertEqual(postprocess_output['detection_boxes'].shape,
(batch_size, max_boxes, 4))
self.assertIn('detection_boxes_strided', postprocess_output)
self.assertEqual(postprocess_output['detection_boxes_strided'].shape,
(batch_size, max_boxes, 4))
self.assertIn(cnma.OBJECT_CENTER, predictions)
self.assertEqual(predictions[cnma.OBJECT_CENTER][0].shape,
(batch_size, height, width // stride, num_classes))
self.assertIn(cnma.BOX_SCALE, predictions)
self.assertEqual(predictions[cnma.BOX_SCALE][0].shape,
(batch_size, height, width // stride, 2))
self.assertIn(cnma.BOX_OFFSET, predictions)
self.assertEqual(predictions[cnma.BOX_OFFSET][0].shape,
(batch_size, height, width // stride, 2))
self.assertIn('preprocessed_inputs', predictions)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 141,960 | 38.42266 | 80 | py |
models | models-master/research/object_detection/meta_architectures/ssd_meta_arch_test_lib.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for SSD models meta architecture tests."""
import functools
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import post_processing_builder
from object_detection.core import anchor_generator
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import target_assigner
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.protos import calibration_pb2
from object_detection.protos import model_pb2
from object_detection.utils import ops
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
try:
import tf_slim as slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
keras = tf.keras.layers
class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""Fake ssd feature extracture for ssd meta arch tests."""
def __init__(self):
super(FakeSSDFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams_fn=None)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def extract_features(self, preprocessed_inputs):
with tf.variable_scope('mock_model'):
features = slim.conv2d(
inputs=preprocessed_inputs,
num_outputs=32,
kernel_size=1,
scope='layer1')
return [features]
class FakeSSDKerasFeatureExtractor(ssd_meta_arch.SSDKerasFeatureExtractor):
"""Fake keras based ssd feature extracture for ssd meta arch tests."""
def __init__(self):
with tf.name_scope('mock_model'):
super(FakeSSDKerasFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams=None,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
)
self._conv = keras.Conv2D(filters=32, kernel_size=1, name='layer1')
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def _extract_features(self, preprocessed_inputs, **kwargs):
with tf.name_scope('mock_model'):
return [self._conv(preprocessed_inputs)]
class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):
"""A simple 2x2 anchor grid on the unit square used for test only."""
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list, im_height, im_width):
return [
box_list.BoxList(
tf.constant(
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[1., 1., 1.5, 1.5] # Anchor that is outside clip_window.
],
tf.float32))
]
def num_anchors(self):
return 4
class SSDMetaArchTestBase(test_case.TestCase):
"""Base class to test SSD based meta architectures."""
def _create_model(
self,
model_fn=ssd_meta_arch.SSDMetaArch,
apply_hard_mining=True,
normalize_loc_loss_by_codesize=False,
add_background_class=True,
random_example_sampling=False,
expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE,
min_num_negative_samples=1,
desired_negative_sampling_ratio=3,
predict_mask=False,
use_static_shapes=False,
nms_max_size_per_class=5,
calibration_mapping_value=None,
return_raw_detections_during_predict=False):
is_training = False
num_classes = 1
mock_anchor_generator = MockAnchorGenerator2x2()
use_keras = tf_version.is_tf2()
if use_keras:
mock_box_predictor = test_utils.MockKerasBoxPredictor(
is_training, num_classes, add_background_class=add_background_class)
else:
mock_box_predictor = test_utils.MockBoxPredictor(
is_training, num_classes, add_background_class=add_background_class)
mock_box_coder = test_utils.MockBoxCoder()
if use_keras:
fake_feature_extractor = FakeSSDKerasFeatureExtractor()
else:
fake_feature_extractor = FakeSSDFeatureExtractor()
mock_matcher = test_utils.MockMatcher()
region_similarity_calculator = sim_calc.IouSimilarity()
encode_background_as_zeros = False
def image_resizer_fn(image):
return [tf.identity(image), tf.shape(image)]
classification_loss = losses.WeightedSigmoidClassificationLoss()
localization_loss = losses.WeightedSmoothL1LocalizationLoss()
non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=-20.0,
iou_thresh=1.0,
max_size_per_class=nms_max_size_per_class,
max_total_size=nms_max_size_per_class,
use_static_shapes=use_static_shapes)
score_conversion_fn = tf.identity
calibration_config = calibration_pb2.CalibrationConfig()
if calibration_mapping_value:
calibration_text_proto = """
function_approximation {
x_y_pairs {
x_y_pair {
x: 0.0
y: %f
}
x_y_pair {
x: 1.0
y: %f
}}}""" % (calibration_mapping_value, calibration_mapping_value)
text_format.Merge(calibration_text_proto, calibration_config)
score_conversion_fn = (
post_processing_builder._build_calibrated_score_converter( # pylint: disable=protected-access
tf.identity, calibration_config))
classification_loss_weight = 1.0
localization_loss_weight = 1.0
negative_class_weight = 1.0
normalize_loss_by_num_matches = False
hard_example_miner = None
if apply_hard_mining:
# This hard example miner is expected to be a no-op.
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=1.0)
random_example_sampler = None
if random_example_sampling:
random_example_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=0.5)
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
mock_matcher,
mock_box_coder,
negative_class_weight=negative_class_weight)
model_config = model_pb2.DetectionModel()
if expected_loss_weights == model_config.ssd.loss.NONE:
expected_loss_weights_fn = None
else:
raise ValueError('Not a valid value for expected_loss_weights.')
code_size = 4
kwargs = {}
if predict_mask:
kwargs.update({
'mask_prediction_fn': test_utils.MockMaskHead(num_classes=1).predict,
})
model = model_fn(
is_training=is_training,
anchor_generator=mock_anchor_generator,
box_predictor=mock_box_predictor,
box_coder=mock_box_coder,
feature_extractor=fake_feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_loss_weight,
localization_loss_weight=localization_loss_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=False,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
add_background_class=add_background_class,
random_example_sampler=random_example_sampler,
expected_loss_weights_fn=expected_loss_weights_fn,
return_raw_detections_during_predict=(
return_raw_detections_during_predict),
**kwargs)
return model, num_classes, mock_anchor_generator.num_anchors(), code_size
def _get_value_for_matching_key(self, dictionary, suffix):
for key in dictionary.keys():
if key.endswith(suffix):
return dictionary[key]
raise ValueError('key not found {}'.format(suffix))
| 9,337 | 34.915385 | 104 | py |
models | models-master/research/object_detection/meta_architectures/ssd_meta_arch.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Meta-architecture definition.
General tensorflow implementation of convolutional Multibox/SSD detection
models.
"""
import abc
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow.python.util.deprecation import deprecated_args
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import matcher
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils
# pylint: disable=g-import-not-at-top
try:
import tf_slim as slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
class SSDFeatureExtractor(object):
"""SSD Slim Feature Extractor definition."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
self._is_training = is_training
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
self._pad_to_multiple = pad_to_multiple
self._conv_hyperparams_fn = conv_hyperparams_fn
self._reuse_weights = reuse_weights
self._use_explicit_padding = use_explicit_padding
self._use_depthwise = use_depthwise
self._num_layers = num_layers
self._override_base_feature_extractor_hyperparams = (
override_base_feature_extractor_hyperparams)
@property
def is_keras_model(self):
return False
@abc.abstractmethod
def preprocess(self, resized_inputs):
"""Preprocesses images for feature extraction (minus image resizing).
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
pass
@abc.abstractmethod
def extract_features(self, preprocessed_inputs):
"""Extracts features from preprocessed inputs.
This function is responsible for extracting feature maps from preprocessed
images.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
raise NotImplementedError
def restore_from_classification_checkpoint_fn(self, feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
feature_extractor_scope: A scope name for the feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
var_name = variable.op.name
if var_name.startswith(feature_extractor_scope + '/'):
var_name = var_name.replace(feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
class SSDKerasFeatureExtractor(tf.keras.Model):
"""SSD Feature Extractor definition."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False,
name=None):
"""Constructor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_config`.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDKerasFeatureExtractor, self).__init__(name=name)
self._is_training = is_training
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
self._pad_to_multiple = pad_to_multiple
self._conv_hyperparams = conv_hyperparams
self._freeze_batchnorm = freeze_batchnorm
self._inplace_batchnorm_update = inplace_batchnorm_update
self._use_explicit_padding = use_explicit_padding
self._use_depthwise = use_depthwise
self._num_layers = num_layers
self._override_base_feature_extractor_hyperparams = (
override_base_feature_extractor_hyperparams)
@property
def is_keras_model(self):
return True
@abc.abstractmethod
def preprocess(self, resized_inputs):
"""Preprocesses images for feature extraction (minus image resizing).
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
raise NotImplementedError
@abc.abstractmethod
def _extract_features(self, preprocessed_inputs):
"""Extracts features from preprocessed inputs.
This function is responsible for extracting feature maps from preprocessed
images.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
raise NotImplementedError
# This overrides the keras.Model `call` method with the _extract_features
# method.
def call(self, inputs, **kwargs):
return self._extract_features(inputs)
class SSDMetaArch(model.DetectionModel):
"""SSD Meta-architecture definition."""
@deprecated_args(None,
'NMS is always placed on TPU; do not use nms_on_host '
'as it has no effect.', 'nms_on_host')
def __init__(self,
is_training,
anchor_generator,
box_predictor,
box_coder,
feature_extractor,
encode_background_as_zeros,
image_resizer_fn,
non_max_suppression_fn,
score_conversion_fn,
classification_loss,
localization_loss,
classification_loss_weight,
localization_loss_weight,
normalize_loss_by_num_matches,
hard_example_miner,
target_assigner_instance,
add_summaries=True,
normalize_loc_loss_by_codesize=False,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
add_background_class=True,
explicit_background_class=False,
random_example_sampler=None,
expected_loss_weights_fn=None,
use_confidences_as_targets=False,
implicit_example_weight=0.5,
equalization_loss_config=None,
return_raw_detections_during_predict=False,
nms_on_host=True):
"""SSDMetaArch Constructor.
TODO(rathodv,jonathanhuang): group NMS parameters + score converter into
a class and loss parameters into a class and write config protos for
postprocessing and losses.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
anchor_generator: an anchor_generator.AnchorGenerator object.
box_predictor: a box_predictor.BoxPredictor object.
box_coder: a box_coder.BoxCoder object.
feature_extractor: a SSDFeatureExtractor object.
encode_background_as_zeros: boolean determining whether background
targets are to be encoded as an all zeros vector or a one-hot
vector (where background is the 0th class).
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within
the resized image tensor as the resized image tensor could be padded.
See builders/image_resizer_builder.py.
non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores` and optional `clip_window`
inputs (with all other inputs already set) and returns a dictionary
hold tensors with keys: `detection_boxes`, `detection_scores`,
`detection_classes` and `num_detections`. See `post_processing.
batch_multiclass_non_max_suppression` for the type and shape of these
tensors.
score_conversion_fn: callable elementwise nonlinearity (that takes tensors
as inputs and returns tensors). This is usually used to convert logits
to probabilities.
classification_loss: an object_detection.core.losses.Loss object.
localization_loss: a object_detection.core.losses.Loss object.
classification_loss_weight: float
localization_loss_weight: float
normalize_loss_by_num_matches: boolean
hard_example_miner: a losses.HardExampleMiner object (can be None)
target_assigner_instance: target_assigner.TargetAssigner instance to use.
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
normalize_loc_loss_by_codesize: whether to normalize localization loss
by code size of the box encoder.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
add_background_class: Whether to add an implicit background class to
one-hot encodings of groundtruth labels. Set to false if training a
single class model or using groundtruth labels with an explicit
background class.
explicit_background_class: Set to true if using groundtruth labels with an
explicit background class, as in multiclass scores.
random_example_sampler: a BalancedPositiveNegativeSampler object that can
perform random example sampling when computing loss. If None, random
sampling process is skipped. Note that random example sampler and hard
example miner can both be applied to the model. In that case, random
sampler will take effect first and hard example miner can only process
the random sampled examples.
expected_loss_weights_fn: If not None, use to calculate
loss by background/foreground weighting. Should take batch_cls_targets
as inputs and return foreground_weights, background_weights. See
expected_classification_loss_by_expected_sampling and
expected_classification_loss_by_reweighting_unmatched_anchors in
third_party/tensorflow_models/object_detection/utils/ops.py as examples.
use_confidences_as_targets: Whether to use groundtruth_condifences field
to assign the targets.
implicit_example_weight: a float number that specifies the weight used
for the implicit negative examples.
equalization_loss_config: a namedtuple that specifies configs for
computing equalization loss.
return_raw_detections_during_predict: Whether to return raw detection
boxes in the predict() method. These are decoded boxes that have not
been through postprocessing (i.e. NMS). Default False.
nms_on_host: boolean (default: True) controlling whether NMS should be
carried out on the host (outside of TPU).
"""
super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes)
self._is_training = is_training
self._freeze_batchnorm = freeze_batchnorm
self._inplace_batchnorm_update = inplace_batchnorm_update
self._anchor_generator = anchor_generator
self._box_predictor = box_predictor
self._box_coder = box_coder
self._feature_extractor = feature_extractor
self._add_background_class = add_background_class
self._explicit_background_class = explicit_background_class
if add_background_class and explicit_background_class:
raise ValueError("Cannot have both 'add_background_class' and"
" 'explicit_background_class' true.")
# Needed for fine-tuning from classification checkpoints whose
# variables do not have the feature extractor scope.
if self._feature_extractor.is_keras_model:
# Keras feature extractors will have a name they implicitly use to scope.
# So, all contained variables are prefixed by this name.
# To load from classification checkpoints, need to filter out this name.
self._extract_features_scope = feature_extractor.name
else:
# Slim feature extractors get an explicit naming scope
self._extract_features_scope = 'FeatureExtractor'
if encode_background_as_zeros:
background_class = [0]
else:
background_class = [1]
if self._add_background_class:
num_foreground_classes = self.num_classes
else:
num_foreground_classes = self.num_classes - 1
self._unmatched_class_label = tf.constant(
background_class + num_foreground_classes * [0], tf.float32)
self._target_assigner = target_assigner_instance
self._classification_loss = classification_loss
self._localization_loss = localization_loss
self._classification_loss_weight = classification_loss_weight
self._localization_loss_weight = localization_loss_weight
self._normalize_loss_by_num_matches = normalize_loss_by_num_matches
self._normalize_loc_loss_by_codesize = normalize_loc_loss_by_codesize
self._hard_example_miner = hard_example_miner
self._random_example_sampler = random_example_sampler
self._parallel_iterations = 16
self._image_resizer_fn = image_resizer_fn
self._non_max_suppression_fn = non_max_suppression_fn
self._score_conversion_fn = score_conversion_fn
self._anchors = None
self._add_summaries = add_summaries
self._batched_prediction_tensor_names = []
self._expected_loss_weights_fn = expected_loss_weights_fn
self._use_confidences_as_targets = use_confidences_as_targets
self._implicit_example_weight = implicit_example_weight
self._equalization_loss_config = equalization_loss_config
self._return_raw_detections_during_predict = (
return_raw_detections_during_predict)
@property
def feature_extractor(self):
return self._feature_extractor
@property
def anchors(self):
if not self._anchors:
raise RuntimeError('anchors have not been constructed yet!')
if not isinstance(self._anchors, box_list.BoxList):
raise RuntimeError('anchors should be a BoxList object, but is not.')
return self._anchors
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
SSD meta architecture uses a default clip_window of [0, 0, 1, 1] during
post-processing. On calling `preprocess` method, clip_window gets updated
based on `true_image_shapes` returned by `image_resizer_fn`.
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
with tf.name_scope('Preprocessor'):
normalized_inputs = self._feature_extractor.preprocess(inputs)
return shape_utils.resize_images_and_return_shapes(
normalized_inputs, self._image_resizer_fn)
def _compute_clip_window(self, preprocessed_images, true_image_shapes):
"""Computes clip window to use during post_processing.
Computes a new clip window to use during post-processing based on
`resized_image_shapes` and `true_image_shapes` only if `preprocess` method
has been called. Otherwise returns a default clip window of [0, 0, 1, 1].
Args:
preprocessed_images: the [batch, height, width, channels] image
tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros. Or None if the clip window should cover the full image.
Returns:
a 2-D float32 tensor of the form [batch_size, 4] containing the clip
window for each image in the batch in normalized coordinates (relative to
the resized dimensions) where each clip window is of the form [ymin, xmin,
ymax, xmax] or a default clip window of [0, 0, 1, 1].
"""
if true_image_shapes is None:
return tf.constant([0, 0, 1, 1], dtype=tf.float32)
resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_images)
true_heights, true_widths, _ = tf.unstack(
tf.cast(true_image_shapes, dtype=tf.float32), axis=1)
padded_height = tf.cast(resized_inputs_shape[1], dtype=tf.float32)
padded_width = tf.cast(resized_inputs_shape[2], dtype=tf.float32)
return tf.stack(
[
tf.zeros_like(true_heights),
tf.zeros_like(true_widths), true_heights / padded_height,
true_widths / padded_width
],
axis=1)
def predict(self, preprocessed_inputs, true_image_shapes):
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the forward
pass of the network to yield unpostprocessesed predictions.
A side effect of calling the predict method is that self._anchors is
populated with a box_list.BoxList of anchors. These anchors must be
constructed before the postprocess or loss functions can be called.
Args:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) preprocessed_inputs: the [batch, height, width, channels] image
tensor.
2) box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
3) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
4) feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
5) anchors: 2-D float tensor of shape [num_anchors, 4] containing
the generated anchors in normalized coordinates.
6) final_anchors: 3-D float tensor of shape [batch_size, num_anchors, 4]
containing the generated anchors in normalized coordinates.
If self._return_raw_detections_during_predict is True, the dictionary
will also contain:
7) raw_detection_boxes: a 4-D float32 tensor with shape
[batch_size, self.max_num_proposals, 4] in normalized coordinates.
8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape
[batch_size, self.max_num_proposals].
"""
if self._inplace_batchnorm_update:
batchnorm_updates_collections = None
else:
batchnorm_updates_collections = tf.GraphKeys.UPDATE_OPS
if self._feature_extractor.is_keras_model:
feature_maps = self._feature_extractor(preprocessed_inputs)
else:
with slim.arg_scope([slim.batch_norm],
is_training=(self._is_training and
not self._freeze_batchnorm),
updates_collections=batchnorm_updates_collections):
with tf.variable_scope(None, self._extract_features_scope,
[preprocessed_inputs]):
feature_maps = self._feature_extractor.extract_features(
preprocessed_inputs)
feature_map_spatial_dims = self._get_feature_map_spatial_dims(
feature_maps)
logging.info('feature_map_spatial_dims: %s', feature_map_spatial_dims)
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
boxlist_list = self._anchor_generator.generate(
feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2])
self._anchors = box_list_ops.concatenate(boxlist_list)
if self._box_predictor.is_keras_model:
predictor_results_dict = self._box_predictor(feature_maps)
else:
with slim.arg_scope([slim.batch_norm],
is_training=(self._is_training and
not self._freeze_batchnorm),
updates_collections=batchnorm_updates_collections):
predictor_results_dict = self._box_predictor.predict(
feature_maps, self._anchor_generator.num_anchors_per_location())
predictions_dict = {
'preprocessed_inputs':
preprocessed_inputs,
'feature_maps':
feature_maps,
'anchors':
self._anchors.get(),
'final_anchors':
tf.tile(
tf.expand_dims(self._anchors.get(), 0), [image_shape[0], 1, 1])
}
for prediction_key, prediction_list in iter(predictor_results_dict.items()):
prediction = tf.concat(prediction_list, axis=1)
if (prediction_key == 'box_encodings' and prediction.shape.ndims == 4 and
prediction.shape[2] == 1):
prediction = tf.squeeze(prediction, axis=2)
predictions_dict[prediction_key] = prediction
if self._return_raw_detections_during_predict:
predictions_dict.update(self._raw_detections_and_feature_map_inds(
predictions_dict['box_encodings'], boxlist_list))
self._batched_prediction_tensor_names = [x for x in predictions_dict
if x != 'anchors']
return predictions_dict
def _raw_detections_and_feature_map_inds(self, box_encodings, boxlist_list):
anchors = self._anchors.get()
raw_detection_boxes, _ = self._batch_decode(box_encodings, anchors)
batch_size, _, _ = shape_utils.combined_static_and_dynamic_shape(
raw_detection_boxes)
feature_map_indices = (
self._anchor_generator.anchor_index_to_feature_map_index(boxlist_list))
feature_map_indices_batched = tf.tile(
tf.expand_dims(feature_map_indices, 0),
multiples=[batch_size, 1])
return {
fields.PredictionFields.raw_detection_boxes: raw_detection_boxes,
fields.PredictionFields.raw_detection_feature_map_indices:
feature_map_indices_batched
}
def _get_feature_map_spatial_dims(self, feature_maps):
"""Return list of spatial dimensions for each feature map in a list.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
Returns:
a list of pairs (height, width) for each feature map in feature_maps
"""
feature_map_shapes = [
shape_utils.combined_static_and_dynamic_shape(
feature_map) for feature_map in feature_maps
]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
def postprocess(self, prediction_dict, true_image_shapes):
"""Converts prediction tensors to final detections.
This function converts raw predictions tensors to final detection results by
slicing off the background class, decoding box predictions and applying
non max suppression and clipping to the image window.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_conversion_fn is
used, then scores are remapped (and may thus have a different
interpretation).
Args:
prediction_dict: a dictionary holding prediction tensors with
1) preprocessed_inputs: a [batch, height, width, channels] image
tensor.
2) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
3) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
4) mask_predictions: (optional) a 5-D float tensor of shape
[batch_size, num_anchors, q, mask_height, mask_width]. `q` can be
either number of classes or 1 depending on whether a separate mask is
predicted per class.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros. Or None, if the clip window should cover the full image.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detections, 4] tensor with post-processed
detection boxes.
detection_scores: [batch, max_detections] tensor with scalar scores for
post-processed detection boxes.
detection_multiclass_scores: [batch, max_detections,
num_classes_with_background] tensor with class score distribution for
post-processed detection boxes including background class if any.
detection_classes: [batch, max_detections] tensor with classes for
post-processed detection classes.
detection_keypoints: [batch, max_detections, num_keypoints, 2] (if
encoded in the prediction_dict 'box_encodings')
detection_masks: [batch_size, max_detections, mask_height, mask_width]
(optional)
num_detections: [batch]
raw_detection_boxes: [batch, total_detections, 4] tensor with decoded
detection boxes before Non-Max Suppression.
raw_detection_score: [batch, total_detections,
num_classes_with_background] tensor of multi-class scores for raw
detection boxes.
Raises:
ValueError: if prediction_dict does not contain `box_encodings` or
`class_predictions_with_background` fields.
"""
if ('box_encodings' not in prediction_dict or
'class_predictions_with_background' not in prediction_dict):
raise ValueError('prediction_dict does not contain expected entries.')
if 'anchors' not in prediction_dict:
prediction_dict['anchors'] = self.anchors.get()
with tf.name_scope('Postprocessor'):
preprocessed_images = prediction_dict['preprocessed_inputs']
box_encodings = prediction_dict['box_encodings']
box_encodings = tf.identity(box_encodings, 'raw_box_encodings')
class_predictions_with_background = (
prediction_dict['class_predictions_with_background'])
detection_boxes, detection_keypoints = self._batch_decode(
box_encodings, prediction_dict['anchors'])
detection_boxes = tf.identity(detection_boxes, 'raw_box_locations')
detection_boxes = tf.expand_dims(detection_boxes, axis=2)
detection_scores_with_background = self._score_conversion_fn(
class_predictions_with_background)
detection_scores = tf.identity(detection_scores_with_background,
'raw_box_scores')
if self._add_background_class or self._explicit_background_class:
detection_scores = tf.slice(detection_scores, [0, 0, 1], [-1, -1, -1])
additional_fields = None
batch_size = (
shape_utils.combined_static_and_dynamic_shape(preprocessed_images)[0])
if 'feature_maps' in prediction_dict:
feature_map_list = []
for feature_map in prediction_dict['feature_maps']:
feature_map_list.append(tf.reshape(feature_map, [batch_size, -1]))
box_features = tf.concat(feature_map_list, 1)
box_features = tf.identity(box_features, 'raw_box_features')
additional_fields = {
'multiclass_scores': detection_scores_with_background
}
if self._anchors is not None:
num_boxes = (self._anchors.num_boxes_static() or
self._anchors.num_boxes())
anchor_indices = tf.range(num_boxes)
batch_anchor_indices = tf.tile(
tf.expand_dims(anchor_indices, 0), [batch_size, 1])
# All additional fields need to be float.
additional_fields.update({
'anchor_indices': tf.cast(batch_anchor_indices, tf.float32),
})
if detection_keypoints is not None:
detection_keypoints = tf.identity(
detection_keypoints, 'raw_keypoint_locations')
additional_fields[fields.BoxListFields.keypoints] = detection_keypoints
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields,
num_detections) = self._non_max_suppression_fn(
detection_boxes,
detection_scores,
clip_window=self._compute_clip_window(
preprocessed_images, true_image_shapes),
additional_fields=additional_fields,
masks=prediction_dict.get('mask_predictions'))
detection_dict = {
fields.DetectionResultFields.detection_boxes:
nmsed_boxes,
fields.DetectionResultFields.detection_scores:
nmsed_scores,
fields.DetectionResultFields.detection_classes:
nmsed_classes,
fields.DetectionResultFields.num_detections:
tf.cast(num_detections, dtype=tf.float32),
fields.DetectionResultFields.raw_detection_boxes:
tf.squeeze(detection_boxes, axis=2),
fields.DetectionResultFields.raw_detection_scores:
detection_scores_with_background
}
if (nmsed_additional_fields is not None and
fields.InputDataFields.multiclass_scores in nmsed_additional_fields):
detection_dict[
fields.DetectionResultFields.detection_multiclass_scores] = (
nmsed_additional_fields[
fields.InputDataFields.multiclass_scores])
if (nmsed_additional_fields is not None and
'anchor_indices' in nmsed_additional_fields):
detection_dict.update({
fields.DetectionResultFields.detection_anchor_indices:
tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32),
})
if (nmsed_additional_fields is not None and
fields.BoxListFields.keypoints in nmsed_additional_fields):
detection_dict[fields.DetectionResultFields.detection_keypoints] = (
nmsed_additional_fields[fields.BoxListFields.keypoints])
if nmsed_masks is not None:
detection_dict[
fields.DetectionResultFields.detection_masks] = nmsed_masks
return detection_dict
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`localization_loss` and
`classification_loss`) to scalar tensors representing corresponding loss
values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
keypoints = None
if self.groundtruth_has_field(fields.BoxListFields.keypoints):
keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints)
weights = None
if self.groundtruth_has_field(fields.BoxListFields.weights):
weights = self.groundtruth_lists(fields.BoxListFields.weights)
confidences = None
if self.groundtruth_has_field(fields.BoxListFields.confidences):
confidences = self.groundtruth_lists(fields.BoxListFields.confidences)
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match) = self._assign_targets(
self.groundtruth_lists(fields.BoxListFields.boxes),
self.groundtruth_lists(fields.BoxListFields.classes),
keypoints, weights, confidences)
match_list = [matcher.Match(match) for match in tf.unstack(batch_match)]
if self._add_summaries:
self._summarize_target_assignment(
self.groundtruth_lists(fields.BoxListFields.boxes), match_list)
if self._random_example_sampler:
batch_cls_per_anchor_weights = tf.reduce_mean(
batch_cls_weights, axis=-1)
batch_sampled_indicator = tf.cast(
shape_utils.static_or_dynamic_map_fn(
self._minibatch_subsample_fn,
[batch_cls_targets, batch_cls_per_anchor_weights],
dtype=tf.bool,
parallel_iterations=self._parallel_iterations,
back_prop=True), dtype=tf.float32)
batch_reg_weights = tf.multiply(batch_sampled_indicator,
batch_reg_weights)
batch_cls_weights = tf.multiply(
tf.expand_dims(batch_sampled_indicator, -1),
batch_cls_weights)
losses_mask = None
if self.groundtruth_has_field(fields.InputDataFields.is_annotated):
losses_mask = tf.stack(self.groundtruth_lists(
fields.InputDataFields.is_annotated))
location_losses = self._localization_loss(
prediction_dict['box_encodings'],
batch_reg_targets,
ignore_nan_targets=True,
weights=batch_reg_weights,
losses_mask=losses_mask)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'],
batch_cls_targets,
weights=batch_cls_weights,
losses_mask=losses_mask)
if self._expected_loss_weights_fn:
# Need to compute losses for assigned targets against the
# unmatched_class_label as well as their assigned targets.
# simplest thing (but wasteful) is just to calculate all losses
# twice
batch_size, num_anchors, num_classes = batch_cls_targets.get_shape()
unmatched_targets = tf.ones([batch_size, num_anchors, 1
]) * self._unmatched_class_label
unmatched_cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'],
unmatched_targets,
weights=batch_cls_weights,
losses_mask=losses_mask)
if cls_losses.get_shape().ndims == 3:
batch_size, num_anchors, num_classes = cls_losses.get_shape()
cls_losses = tf.reshape(cls_losses, [batch_size, -1])
unmatched_cls_losses = tf.reshape(unmatched_cls_losses,
[batch_size, -1])
batch_cls_targets = tf.reshape(
batch_cls_targets, [batch_size, num_anchors * num_classes, -1])
batch_cls_targets = tf.concat(
[1 - batch_cls_targets, batch_cls_targets], axis=-1)
location_losses = tf.tile(location_losses, [1, num_classes])
foreground_weights, background_weights = (
self._expected_loss_weights_fn(batch_cls_targets))
cls_losses = (
foreground_weights * cls_losses +
background_weights * unmatched_cls_losses)
location_losses *= foreground_weights
classification_loss = tf.reduce_sum(cls_losses)
localization_loss = tf.reduce_sum(location_losses)
elif self._hard_example_miner:
cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2)
(localization_loss, classification_loss) = self._apply_hard_mining(
location_losses, cls_losses, prediction_dict, match_list)
if self._add_summaries:
self._hard_example_miner.summarize()
else:
cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2)
localization_loss = tf.reduce_sum(location_losses)
classification_loss = tf.reduce_sum(cls_losses)
# Optionally normalize by number of positive matches
normalizer = tf.constant(1.0, dtype=tf.float32)
if self._normalize_loss_by_num_matches:
normalizer = tf.maximum(tf.cast(tf.reduce_sum(batch_reg_weights),
dtype=tf.float32),
1.0)
localization_loss_normalizer = normalizer
if self._normalize_loc_loss_by_codesize:
localization_loss_normalizer *= self._box_coder.code_size
localization_loss = tf.multiply((self._localization_loss_weight /
localization_loss_normalizer),
localization_loss,
name='localization_loss')
classification_loss = tf.multiply((self._classification_loss_weight /
normalizer), classification_loss,
name='classification_loss')
loss_dict = {
'Loss/localization_loss': localization_loss,
'Loss/classification_loss': classification_loss
}
return loss_dict
def _minibatch_subsample_fn(self, inputs):
"""Randomly samples anchors for one image.
Args:
inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors,
num_classes] indicating targets assigned to each anchor. Second one
is a tensor of shape [num_anchors] indicating the class weight of each
anchor.
Returns:
batch_sampled_indicator: bool tensor of shape [num_anchors] indicating
whether the anchor should be selected for loss computation.
"""
cls_targets, cls_weights = inputs
if self._add_background_class:
# Set background_class bits to 0 so that the positives_indicator
# computation would not consider background class.
background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1]))
regular_class = tf.slice(cls_targets, [0, 1], [-1, -1])
cls_targets = tf.concat([background_class, regular_class], 1)
positives_indicator = tf.reduce_sum(cls_targets, axis=1)
return self._random_example_sampler.subsample(
tf.cast(cls_weights, tf.bool),
batch_size=None,
labels=tf.cast(positives_indicator, tf.bool))
def _summarize_anchor_classification_loss(self, class_ids, cls_losses):
positive_indices = tf.where(tf.greater(class_ids, 0))
positive_anchor_cls_loss = tf.squeeze(
tf.gather(cls_losses, positive_indices), axis=1)
visualization_utils.add_cdf_image_summary(positive_anchor_cls_loss,
'PositiveAnchorLossCDF')
negative_indices = tf.where(tf.equal(class_ids, 0))
negative_anchor_cls_loss = tf.squeeze(
tf.gather(cls_losses, negative_indices), axis=1)
visualization_utils.add_cdf_image_summary(negative_anchor_cls_loss,
'NegativeAnchorLossCDF')
def _assign_targets(self,
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_keypoints_list=None,
groundtruth_weights_list=None,
groundtruth_confidences_list=None):
"""Assign groundtruth targets.
Adds a background class to each one-hot encoding of groundtruth classes
and uses target assigner to obtain regression and classification targets.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max]
format and assumed to be normalized and clipped
relative to the image window with y_min <= y_max and x_min <= x_max.
groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of
shape [num_boxes, num_classes] containing the class targets with the 0th
index assumed to map to the first non-background class.
groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape
[num_boxes, num_keypoints, 2]
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape
[num_boxes, num_classes] containing class confidences for
groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors], containing
result of anchor groundtruth matching. Each position in the tensor
indicates an anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth
match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background
and does not have sufficient overlap to call it a foreground.
"""
groundtruth_boxlists = [
box_list.BoxList(boxes) for boxes in groundtruth_boxes_list
]
train_using_confidences = (self._is_training and
self._use_confidences_as_targets)
if self._add_background_class:
groundtruth_classes_with_background_list = [
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT')
for one_hot_encoding in groundtruth_classes_list
]
if train_using_confidences:
groundtruth_confidences_with_background_list = [
tf.pad(groundtruth_confidences, [[0, 0], [1, 0]], mode='CONSTANT')
for groundtruth_confidences in groundtruth_confidences_list
]
else:
groundtruth_classes_with_background_list = groundtruth_classes_list
if groundtruth_keypoints_list is not None:
for boxlist, keypoints in zip(
groundtruth_boxlists, groundtruth_keypoints_list):
boxlist.add_field(fields.BoxListFields.keypoints, keypoints)
if train_using_confidences:
return target_assigner.batch_assign_confidences(
self._target_assigner,
self.anchors,
groundtruth_boxlists,
groundtruth_confidences_with_background_list,
groundtruth_weights_list,
self._unmatched_class_label,
self._add_background_class,
self._implicit_example_weight)
else:
return target_assigner.batch_assign_targets(
self._target_assigner,
self.anchors,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
self._unmatched_class_label,
groundtruth_weights_list)
def _summarize_target_assignment(self, groundtruth_boxes_list, match_list):
"""Creates tensorflow summaries for the input boxes and anchors.
This function creates four summaries corresponding to the average
number (over images in a batch) of (1) groundtruth boxes, (2) anchors
marked as positive, (3) anchors marked as negative, and (4) anchors marked
as ignored.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing corners of the groundtruth boxes.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
# TODO(rathodv): Add a test for these summaries.
try:
# TODO(kaftan): Integrate these summaries into the v2 style loops
with tf.compat.v2.init_scope():
if tf.compat.v2.executing_eagerly():
return
except AttributeError:
pass
avg_num_gt_boxes = tf.reduce_mean(
tf.cast(
tf.stack([tf.shape(x)[0] for x in groundtruth_boxes_list]),
dtype=tf.float32))
avg_num_matched_gt_boxes = tf.reduce_mean(
tf.cast(
tf.stack([match.num_matched_rows() for match in match_list]),
dtype=tf.float32))
avg_pos_anchors = tf.reduce_mean(
tf.cast(
tf.stack([match.num_matched_columns() for match in match_list]),
dtype=tf.float32))
avg_neg_anchors = tf.reduce_mean(
tf.cast(
tf.stack([match.num_unmatched_columns() for match in match_list]),
dtype=tf.float32))
avg_ignored_anchors = tf.reduce_mean(
tf.cast(
tf.stack([match.num_ignored_columns() for match in match_list]),
dtype=tf.float32))
tf.summary.scalar('AvgNumGroundtruthBoxesPerImage',
avg_num_gt_boxes,
family='TargetAssignment')
tf.summary.scalar('AvgNumGroundtruthBoxesMatchedPerImage',
avg_num_matched_gt_boxes,
family='TargetAssignment')
tf.summary.scalar('AvgNumPositiveAnchorsPerImage',
avg_pos_anchors,
family='TargetAssignment')
tf.summary.scalar('AvgNumNegativeAnchorsPerImage',
avg_neg_anchors,
family='TargetAssignment')
tf.summary.scalar('AvgNumIgnoredAnchorsPerImage',
avg_ignored_anchors,
family='TargetAssignment')
def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict,
match_list):
"""Applies hard mining to anchorwise losses.
Args:
location_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise location losses.
cls_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise classification losses.
prediction_dict: p a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
3) anchors: (optional) 2-D float tensor of shape [num_anchors, 4].
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Returns:
mined_location_loss: a float scalar with sum of localization losses from
selected hard examples.
mined_cls_loss: a float scalar with sum of classification losses from
selected hard examples.
"""
class_predictions = prediction_dict['class_predictions_with_background']
if self._add_background_class:
class_predictions = tf.slice(class_predictions, [0, 0, 1], [-1, -1, -1])
if 'anchors' not in prediction_dict:
prediction_dict['anchors'] = self.anchors.get()
decoded_boxes, _ = self._batch_decode(prediction_dict['box_encodings'],
prediction_dict['anchors'])
decoded_box_tensors_list = tf.unstack(decoded_boxes)
class_prediction_list = tf.unstack(class_predictions)
decoded_boxlist_list = []
for box_location, box_score in zip(decoded_box_tensors_list,
class_prediction_list):
decoded_boxlist = box_list.BoxList(box_location)
decoded_boxlist.add_field('scores', box_score)
decoded_boxlist_list.append(decoded_boxlist)
return self._hard_example_miner(
location_losses=location_losses,
cls_losses=cls_losses,
decoded_boxlist_list=decoded_boxlist_list,
match_list=match_list)
def _batch_decode(self, box_encodings, anchors):
"""Decodes a batch of box encodings with respect to the anchors.
Args:
box_encodings: A float32 tensor of shape
[batch_size, num_anchors, box_code_size] containing box encodings.
anchors: A tensor of shape [num_anchors, 4].
Returns:
decoded_boxes: A float32 tensor of shape
[batch_size, num_anchors, 4] containing the decoded boxes.
decoded_keypoints: A float32 tensor of shape
[batch_size, num_anchors, num_keypoints, 2] containing the decoded
keypoints if present in the input `box_encodings`, None otherwise.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
batch_size = combined_shape[0]
tiled_anchor_boxes = tf.tile(tf.expand_dims(anchors, 0), [batch_size, 1, 1])
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
decoded_boxes = self._box_coder.decode(
tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
tiled_anchors_boxlist)
decoded_keypoints = None
if decoded_boxes.has_field(fields.BoxListFields.keypoints):
decoded_keypoints = decoded_boxes.get_field(
fields.BoxListFields.keypoints)
num_keypoints = decoded_keypoints.get_shape()[1]
decoded_keypoints = tf.reshape(
decoded_keypoints,
tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))
decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack(
[combined_shape[0], combined_shape[1], 4]))
return decoded_boxes, decoded_keypoints
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
losses = []
slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# Copy the slim losses to avoid modifying the collection
if slim_losses:
losses.extend(slim_losses)
if self._box_predictor.is_keras_model:
losses.extend(self._box_predictor.losses)
if self._feature_extractor.is_keras_model:
losses.extend(self._feature_extractor.losses)
return losses
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
load_all_detection_checkpoint_vars: whether to load all variables (when
`fine_tune_checkpoint_type` is `detection`). If False, only variables
within the feature extractor scope are included. Default False.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
Raises:
ValueError: if fine_tune_checkpoint_type is neither `classification`
nor `detection`.
"""
if fine_tune_checkpoint_type == 'classification':
return self._feature_extractor.restore_from_classification_checkpoint_fn(
self._extract_features_scope)
elif fine_tune_checkpoint_type == 'detection':
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
var_name = variable.op.name
if load_all_detection_checkpoint_vars:
variables_to_restore[var_name] = variable
else:
if var_name.startswith(self._extract_features_scope):
variables_to_restore[var_name] = variable
return variables_to_restore
else:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (above) is intended
to be used to restore Slim-based models when running Tensorflow 1.x.
Args:
fine_tune_checkpoint_type: A string inidicating the subset of variables
to load. Valid values: `detection`, `classification`, `full`. Default
`detection`.
An SSD checkpoint has three parts:
1) Classification Network (like ResNet)
2) DeConv layers (for FPN)
3) Box/Class prediction parameters
The parameters will be loaded using the following strategy:
`classification` - will load #1
`detection` - will load #1, #2
`full` - will load #1, #2, #3
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
if fine_tune_checkpoint_type == 'classification':
return {
'feature_extractor':
self._feature_extractor.classification_backbone
}
elif fine_tune_checkpoint_type == 'detection':
fake_model = tf.train.Checkpoint(
_feature_extractor=self._feature_extractor)
return {'model': fake_model}
elif fine_tune_checkpoint_type == 'full':
return {'model': self}
else:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
update_ops = []
slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Copy the slim ops to avoid modifying the collection
if slim_update_ops:
update_ops.extend(slim_update_ops)
if self._box_predictor.is_keras_model:
update_ops.extend(self._box_predictor.get_updates_for(None))
update_ops.extend(self._box_predictor.get_updates_for(
self._box_predictor.inputs))
if self._feature_extractor.is_keras_model:
update_ops.extend(self._feature_extractor.get_updates_for(None))
update_ops.extend(self._feature_extractor.get_updates_for(
self._feature_extractor.inputs))
return update_ops
| 61,428 | 43.871439 | 80 | py |
models | models-master/research/object_detection/meta_architectures/context_rcnn_lib_tf2.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library functions for Context R-CNN."""
import tensorflow as tf
from object_detection.core import freezable_batch_norm
# The negative value used in padding the invalid weights.
_NEGATIVE_PADDING_VALUE = -100000
class ContextProjection(tf.keras.layers.Layer):
"""Custom layer to do batch normalization and projection."""
def __init__(self, projection_dimension, **kwargs):
self.batch_norm = freezable_batch_norm.FreezableBatchNorm(
epsilon=0.001,
center=True,
scale=True,
momentum=0.97,
trainable=True)
self.projection = tf.keras.layers.Dense(units=projection_dimension,
use_bias=True)
self.projection_dimension = projection_dimension
super(ContextProjection, self).__init__(**kwargs)
def build(self, input_shape):
self.projection.build(input_shape)
self.batch_norm.build(input_shape[:1] + [self.projection_dimension])
def call(self, input_features, is_training=False):
return tf.nn.relu6(self.batch_norm(self.projection(input_features),
is_training))
class AttentionBlock(tf.keras.layers.Layer):
"""Custom layer to perform all attention."""
def __init__(self, bottleneck_dimension, attention_temperature,
output_dimension=None, is_training=False,
name='AttentionBlock', max_num_proposals=100,
**kwargs):
"""Constructs an attention block.
Args:
bottleneck_dimension: A int32 Tensor representing the bottleneck dimension
for intermediate projections.
attention_temperature: A float Tensor. It controls the temperature of the
softmax for weights calculation. The formula for calculation as follows:
weights = exp(weights / temperature) / sum(exp(weights / temperature))
output_dimension: A int32 Tensor representing the last dimension of the
output feature.
is_training: A boolean Tensor (affecting batch normalization).
name: A string describing what to name the variables in this block.
max_num_proposals: The number of box proposals for each image
**kwargs: Additional keyword arguments.
"""
self._key_proj = ContextProjection(bottleneck_dimension)
self._val_proj = ContextProjection(bottleneck_dimension)
self._query_proj = ContextProjection(bottleneck_dimension)
self._feature_proj = None
self._attention_temperature = attention_temperature
self._bottleneck_dimension = bottleneck_dimension
self._is_training = is_training
self._output_dimension = output_dimension
self._max_num_proposals = max_num_proposals
if self._output_dimension:
self._feature_proj = ContextProjection(self._output_dimension)
super(AttentionBlock, self).__init__(name=name, **kwargs)
def build(self, input_shapes):
"""Finishes building the attention block.
Args:
input_shapes: the shape of the primary input box features.
"""
if not self._feature_proj:
self._output_dimension = input_shapes[-1]
self._feature_proj = ContextProjection(self._output_dimension)
def call(self, box_features, context_features, valid_context_size,
num_proposals):
"""Handles a call by performing attention.
Args:
box_features: A float Tensor of shape [batch_size * input_size, height,
width, num_input_features].
context_features: A float Tensor of shape [batch_size, context_size,
num_context_features].
valid_context_size: A int32 Tensor of shape [batch_size].
num_proposals: A [batch_size] int32 Tensor specifying the number of valid
proposals per image in the batch.
Returns:
A float Tensor with shape [batch_size, input_size, num_input_features]
containing output features after attention with context features.
"""
_, context_size, _ = context_features.shape
keys_values_valid_mask = compute_valid_mask(
valid_context_size, context_size)
total_proposals, height, width, channels = box_features.shape
batch_size = total_proposals // self._max_num_proposals
box_features = tf.reshape(
box_features,
[batch_size,
self._max_num_proposals,
height,
width,
channels])
# Average pools over height and width dimension so that the shape of
# box_features becomes [batch_size, max_num_proposals, channels].
box_features = tf.reduce_mean(box_features, [2, 3])
queries_valid_mask = compute_valid_mask(num_proposals,
box_features.shape[1])
queries = project_features(
box_features, self._bottleneck_dimension, self._is_training,
self._query_proj, normalize=True)
keys = project_features(
context_features, self._bottleneck_dimension, self._is_training,
self._key_proj, normalize=True)
values = project_features(
context_features, self._bottleneck_dimension, self._is_training,
self._val_proj, normalize=True)
# masking out any keys which are padding
keys *= tf.cast(keys_values_valid_mask[..., tf.newaxis], keys.dtype)
queries *= tf.cast(queries_valid_mask[..., tf.newaxis], queries.dtype)
weights = tf.matmul(queries, keys, transpose_b=True)
weights, values = filter_weight_value(weights, values,
keys_values_valid_mask)
weights = tf.nn.softmax(weights / self._attention_temperature)
features = tf.matmul(weights, values)
output_features = project_features(
features, self._output_dimension, self._is_training,
self._feature_proj, normalize=False)
output_features = output_features[:, :, tf.newaxis, tf.newaxis, :]
return output_features
def filter_weight_value(weights, values, valid_mask):
"""Filters weights and values based on valid_mask.
_NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to
avoid their contribution in softmax. 0 will be set for the invalid elements in
the values.
Args:
weights: A float Tensor of shape [batch_size, input_size, context_size].
values: A float Tensor of shape [batch_size, context_size,
projected_dimension].
valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means
valid and False means invalid.
Returns:
weights: A float Tensor of shape [batch_size, input_size, context_size].
values: A float Tensor of shape [batch_size, context_size,
projected_dimension].
Raises:
ValueError: If shape of doesn't match.
"""
w_batch_size, _, w_context_size = weights.shape
v_batch_size, v_context_size, _ = values.shape
m_batch_size, m_context_size = valid_mask.shape
if w_batch_size != v_batch_size or v_batch_size != m_batch_size:
raise ValueError('Please make sure the first dimension of the input'
' tensors are the same.')
if w_context_size != v_context_size:
raise ValueError('Please make sure the third dimension of weights matches'
' the second dimension of values.')
if w_context_size != m_context_size:
raise ValueError('Please make sure the third dimension of the weights'
' matches the second dimension of the valid_mask.')
valid_mask = valid_mask[..., tf.newaxis]
# Force the invalid weights to be very negative so it won't contribute to
# the softmax.
weights += tf.transpose(
tf.cast(tf.math.logical_not(valid_mask), weights.dtype) *
_NEGATIVE_PADDING_VALUE,
perm=[0, 2, 1])
# Force the invalid values to be 0.
values *= tf.cast(valid_mask, values.dtype)
return weights, values
def project_features(features, bottleneck_dimension, is_training,
layer, normalize=True):
"""Projects features to another feature space.
Args:
features: A float Tensor of shape [batch_size, features_size,
num_features].
bottleneck_dimension: A int32 Tensor.
is_training: A boolean Tensor (affecting batch normalization).
layer: Contains a custom layer specific to the particular operation
being performed (key, value, query, features)
normalize: A boolean Tensor. If true, the output features will be l2
normalized on the last dimension.
Returns:
A float Tensor of shape [batch, features_size, projection_dimension].
"""
shape_arr = features.shape
batch_size, _, num_features = shape_arr
features = tf.reshape(features, [-1, num_features])
projected_features = layer(features, is_training)
projected_features = tf.reshape(projected_features,
[batch_size, -1, bottleneck_dimension])
if normalize:
projected_features = tf.keras.backend.l2_normalize(projected_features,
axis=-1)
return projected_features
def compute_valid_mask(num_valid_elements, num_elements):
"""Computes mask of valid entries within padded context feature.
Args:
num_valid_elements: A int32 Tensor of shape [batch_size].
num_elements: An int32 Tensor.
Returns:
A boolean Tensor of the shape [batch_size, num_elements]. True means
valid and False means invalid.
"""
batch_size = num_valid_elements.shape[0]
element_idxs = tf.range(num_elements, dtype=tf.int32)
batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1])
num_valid_elements = num_valid_elements[..., tf.newaxis]
valid_mask = tf.less(batch_element_idxs, num_valid_elements)
return valid_mask
| 10,255 | 37.848485 | 80 | py |
models | models-master/research/object_detection/meta_architectures/ssd_meta_arch_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.ssd_meta_arch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.meta_architectures import ssd_meta_arch_test_lib
from object_detection.protos import model_pb2
from object_detection.utils import test_utils
# pylint: disable=g-import-not-at-top
try:
import tf_slim as slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
keras = tf.keras.layers
class SsdMetaArchTest(ssd_meta_arch_test_lib.SSDMetaArchTestBase,
parameterized.TestCase):
def _create_model(
self,
apply_hard_mining=True,
normalize_loc_loss_by_codesize=False,
add_background_class=True,
random_example_sampling=False,
expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE,
min_num_negative_samples=1,
desired_negative_sampling_ratio=3,
predict_mask=False,
use_static_shapes=False,
nms_max_size_per_class=5,
calibration_mapping_value=None,
return_raw_detections_during_predict=False):
return super(SsdMetaArchTest, self)._create_model(
model_fn=ssd_meta_arch.SSDMetaArch,
apply_hard_mining=apply_hard_mining,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
add_background_class=add_background_class,
random_example_sampling=random_example_sampling,
expected_loss_weights=expected_loss_weights,
min_num_negative_samples=min_num_negative_samples,
desired_negative_sampling_ratio=desired_negative_sampling_ratio,
predict_mask=predict_mask,
use_static_shapes=use_static_shapes,
nms_max_size_per_class=nms_max_size_per_class,
calibration_mapping_value=calibration_mapping_value,
return_raw_detections_during_predict=(
return_raw_detections_during_predict))
def test_preprocess_preserves_shapes_with_dynamic_input_image(self):
width = tf.random.uniform([], minval=5, maxval=10, dtype=tf.int32)
batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
shape = tf.stack([batch, 5, width, 3])
image = tf.random.uniform(shape)
model, _, _, _ = self._create_model()
preprocessed_inputs, _ = model.preprocess(image)
self.assertTrue(
preprocessed_inputs.shape.is_compatible_with([None, 5, None, 3]))
def test_preprocess_preserves_shape_with_static_input_image(self):
image = tf.random.uniform([2, 3, 3, 3])
model, _, _, _ = self._create_model()
preprocessed_inputs, _ = model.preprocess(image)
self.assertTrue(preprocessed_inputs.shape.is_compatible_with([2, 3, 3, 3]))
def test_predict_result_shapes_on_image_with_dynamic_shape(self):
with test_utils.GraphContextOrNone() as g:
model, num_classes, num_anchors, code_size = self._create_model()
def graph_fn():
size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
shape = tf.stack([batch, size, size, 3])
image = tf.random.uniform(shape)
prediction_dict = model.predict(image, true_image_shapes=None)
self.assertIn('box_encodings', prediction_dict)
self.assertIn('class_predictions_with_background', prediction_dict)
self.assertIn('feature_maps', prediction_dict)
self.assertIn('anchors', prediction_dict)
self.assertIn('final_anchors', prediction_dict)
return (prediction_dict['box_encodings'],
prediction_dict['final_anchors'],
prediction_dict['class_predictions_with_background'],
tf.constant(num_anchors), batch)
(box_encodings_out, final_anchors, class_predictions_with_background,
num_anchors, batch_size) = self.execute_cpu(graph_fn, [], graph=g)
self.assertAllEqual(box_encodings_out.shape,
(batch_size, num_anchors, code_size))
self.assertAllEqual(final_anchors.shape,
(batch_size, num_anchors, code_size))
self.assertAllEqual(
class_predictions_with_background.shape,
(batch_size, num_anchors, num_classes + 1))
def test_predict_result_shapes_on_image_with_static_shape(self):
with test_utils.GraphContextOrNone() as g:
model, num_classes, num_anchors, code_size = self._create_model()
def graph_fn(input_image):
predictions = model.predict(input_image, true_image_shapes=None)
return (predictions['box_encodings'],
predictions['class_predictions_with_background'],
predictions['final_anchors'])
batch_size = 3
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
expected_box_encodings_shape = (batch_size, num_anchors, code_size)
expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1)
final_anchors_shape = (batch_size, num_anchors, 4)
(box_encodings, class_predictions, final_anchors) = self.execute(
graph_fn, [input_image], graph=g)
self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape)
self.assertAllEqual(class_predictions.shape,
expected_class_predictions_shape)
self.assertAllEqual(final_anchors.shape, final_anchors_shape)
def test_predict_with_raw_output_fields(self):
with test_utils.GraphContextOrNone() as g:
model, num_classes, num_anchors, code_size = self._create_model(
return_raw_detections_during_predict=True)
def graph_fn(input_image):
predictions = model.predict(input_image, true_image_shapes=None)
return (predictions['box_encodings'],
predictions['class_predictions_with_background'],
predictions['final_anchors'],
predictions['raw_detection_boxes'],
predictions['raw_detection_feature_map_indices'])
batch_size = 3
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
expected_box_encodings_shape = (batch_size, num_anchors, code_size)
expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1)
final_anchors_shape = (batch_size, num_anchors, 4)
expected_raw_detection_boxes_shape = (batch_size, num_anchors, 4)
(box_encodings, class_predictions, final_anchors, raw_detection_boxes,
raw_detection_feature_map_indices) = self.execute(
graph_fn, [input_image], graph=g)
self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape)
self.assertAllEqual(class_predictions.shape,
expected_class_predictions_shape)
self.assertAllEqual(final_anchors.shape, final_anchors_shape)
self.assertAllEqual(raw_detection_boxes.shape,
expected_raw_detection_boxes_shape)
self.assertAllEqual(raw_detection_feature_map_indices,
np.zeros((batch_size, num_anchors)))
def test_raw_detection_boxes_agree_predict_postprocess(self):
with test_utils.GraphContextOrNone() as g:
model, _, _, _ = self._create_model(
return_raw_detections_during_predict=True)
def graph_fn():
size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
shape = tf.stack([batch, size, size, 3])
image = tf.random.uniform(shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
image)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
raw_detection_boxes_predict = prediction_dict['raw_detection_boxes']
detections = model.postprocess(prediction_dict, true_image_shapes)
raw_detection_boxes_postprocess = detections['raw_detection_boxes']
return raw_detection_boxes_predict, raw_detection_boxes_postprocess
(raw_detection_boxes_predict_out,
raw_detection_boxes_postprocess_out) = self.execute_cpu(graph_fn, [],
graph=g)
self.assertAllEqual(raw_detection_boxes_predict_out,
raw_detection_boxes_postprocess_out)
def test_postprocess_results_are_correct(self):
with test_utils.GraphContextOrNone() as g:
model, _, _, _ = self._create_model()
def graph_fn():
size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
shape = tf.stack([batch, size, size, 3])
image = tf.random.uniform(shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
image)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
return [
batch, detections['detection_boxes'], detections['detection_scores'],
detections['detection_classes'],
detections['detection_multiclass_scores'],
detections['num_detections'], detections['raw_detection_boxes'],
detections['raw_detection_scores'],
detections['detection_anchor_indices']
]
expected_boxes = [
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
], # padding
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
]
] # padding
expected_scores = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
expected_multiclass_scores = [[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]]
expected_classes = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
expected_num_detections = np.array([3, 3])
expected_raw_detection_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]],
[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]]]
expected_raw_detection_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
expected_detection_anchor_indices = [[0, 1, 2], [0, 1, 2]]
(batch, detection_boxes, detection_scores, detection_classes,
detection_multiclass_scores, num_detections, raw_detection_boxes,
raw_detection_scores, detection_anchor_indices) = self.execute_cpu(
graph_fn, [], graph=g)
for image_idx in range(batch):
self.assertTrue(
test_utils.first_rows_close_as_set(
detection_boxes[image_idx].tolist(), expected_boxes[image_idx]))
self.assertSameElements(detection_anchor_indices[image_idx],
expected_detection_anchor_indices[image_idx])
self.assertAllClose(detection_scores, expected_scores)
self.assertAllClose(detection_classes, expected_classes)
self.assertAllClose(detection_multiclass_scores, expected_multiclass_scores)
self.assertAllClose(num_detections, expected_num_detections)
self.assertAllEqual(raw_detection_boxes, expected_raw_detection_boxes)
self.assertAllEqual(raw_detection_scores,
expected_raw_detection_scores)
def test_postprocess_results_are_correct_static(self):
with test_utils.GraphContextOrNone() as g:
model, _, _, _ = self._create_model(use_static_shapes=True,
nms_max_size_per_class=4)
def graph_fn(input_image):
preprocessed_inputs, true_image_shapes = model.preprocess(input_image)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
return (detections['detection_boxes'], detections['detection_scores'],
detections['detection_classes'], detections['num_detections'],
detections['detection_multiclass_scores'])
expected_boxes = [
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0]
], # padding
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0]
]
] # padding
expected_scores = [[0, 0, 0, 0], [0, 0, 0, 0]]
expected_multiclass_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
expected_classes = [[0, 0, 0, 0], [0, 0, 0, 0]]
expected_num_detections = np.array([3, 3])
batch_size = 2
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
(detection_boxes, detection_scores, detection_classes,
num_detections, detection_multiclass_scores) = self.execute(graph_fn,
[input_image],
graph=g)
for image_idx in range(batch_size):
self.assertTrue(test_utils.first_rows_close_as_set(
detection_boxes[image_idx][
0:expected_num_detections[image_idx]].tolist(),
expected_boxes[image_idx][0:expected_num_detections[image_idx]]))
self.assertAllClose(
detection_scores[image_idx][0:expected_num_detections[image_idx]],
expected_scores[image_idx][0:expected_num_detections[image_idx]])
self.assertAllClose(
detection_multiclass_scores[image_idx]
[0:expected_num_detections[image_idx]],
expected_multiclass_scores[image_idx]
[0:expected_num_detections[image_idx]])
self.assertAllClose(
detection_classes[image_idx][0:expected_num_detections[image_idx]],
expected_classes[image_idx][0:expected_num_detections[image_idx]])
self.assertAllClose(num_detections,
expected_num_detections)
def test_postprocess_results_are_correct_with_calibration(self):
with test_utils.GraphContextOrNone() as g:
model, _, _, _ = self._create_model(calibration_mapping_value=0.5)
def graph_fn():
size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
shape = tf.stack([batch, size, size, 3])
image = tf.random.uniform(shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
image)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
return detections['detection_scores'], detections['raw_detection_scores']
# Calibration mapping value below is set to map all scores to 0.5, except
# for the last two detections in each batch (see expected number of
# detections below.
expected_scores = [[0.5, 0.5, 0.5, 0., 0.], [0.5, 0.5, 0.5, 0., 0.]]
expected_raw_detection_scores = [
[[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]],
[[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
]
detection_scores, raw_detection_scores = self.execute_cpu(graph_fn, [],
graph=g)
self.assertAllClose(detection_scores, expected_scores)
self.assertAllEqual(raw_detection_scores, expected_raw_detection_scores)
def test_loss_results_are_correct(self):
with test_utils.GraphContextOrNone() as g:
model, num_classes, num_anchors, _ = self._create_model(
apply_hard_mining=False)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
(localization_loss, classification_loss) = self.execute(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
],
graph=g)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_with_normalize_by_codesize_true(self):
with test_utils.GraphContextOrNone() as g:
model, _, _, _ = self._create_model(
apply_hard_mining=False, normalize_loc_loss_by_codesize=True)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),)
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, 1, 1]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, 1, 1]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.5 / 4
localization_loss = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_classes1,
groundtruth_classes2], graph=g)
self.assertAllClose(localization_loss, expected_localization_loss)
def test_loss_results_are_correct_with_hard_example_mining(self):
with test_utils.GraphContextOrNone() as g:
model, num_classes, num_anchors, _ = self._create_model()
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss, classification_loss) = self.execute_cpu(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
], graph=g)
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_without_add_background_class(self):
with test_utils.GraphContextOrNone() as g:
model, num_classes, num_anchors, _ = self._create_model(
apply_hard_mining=False, add_background_class=False)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(
preprocessed_tensor, true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (loss_dict['Loss/localization_loss'],
loss_dict['Loss/classification_loss'])
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (
batch_size * num_anchors * num_classes * np.log(2.0))
(localization_loss, classification_loss) = self.execute(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
], graph=g)
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_with_losses_mask(self):
with test_utils.GraphContextOrNone() as g:
model, num_classes, num_anchors, _ = self._create_model(
apply_hard_mining=False)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_boxes3, groundtruth_classes1, groundtruth_classes2,
groundtruth_classes3):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2,
groundtruth_boxes3]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2,
groundtruth_classes3]
is_annotated_list = [tf.constant(True), tf.constant(True),
tf.constant(False)]
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
is_annotated_list=is_annotated_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 3
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes3 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
groundtruth_classes3 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
# Note that we are subtracting 1 from batch_size, since the final image is
# not annotated.
expected_classification_loss = ((batch_size - 1) * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss,
classification_loss) = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_boxes3,
groundtruth_classes1,
groundtruth_classes2,
groundtruth_classes3],
graph=g)
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_restore_map_for_detection_ckpt(self):
# TODO(rathodv): Support TF2.X
if self.is_tf2(): return
model, _, _, _ = self._create_model()
model.predict(tf.constant(np.array([[[[0, 0], [1, 1]], [[1, 0], [0, 1]]]],
dtype=np.float32)),
true_image_shapes=None)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.session() as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var)
def test_restore_map_for_classification_ckpt(self):
# TODO(rathodv): Support TF2.X
if self.is_tf2(): return
# Define mock tensorflow classification graph and save variables.
test_graph_classification = tf.Graph()
with test_graph_classification.as_default():
image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3])
with tf.variable_scope('mock_model'):
net = slim.conv2d(image, num_outputs=32, kernel_size=1, scope='layer1')
slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.session(graph=test_graph_classification) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
# Create tensorflow detection graph and load variables from
# classification checkpoint.
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model()
inputs_shape = [2, 2, 2, 3]
inputs = tf.cast(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(fine_tune_checkpoint_type='classification')
self.assertNotIn('another_variable', var_map)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.session(graph=test_graph_detection) as sess:
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn(six.ensure_binary('FeatureExtractor'), var)
def test_load_all_det_checkpoint_vars(self):
if self.is_tf2(): return
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model()
inputs_shape = [2, 2, 2, 3]
inputs = tf.cast(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32),
dtype=tf.float32)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=True)
self.assertIsInstance(var_map, dict)
self.assertIn('another_variable', var_map)
def test_load_checkpoint_vars_tf2(self):
if not self.is_tf2():
self.skipTest('Not running TF2 checkpoint test with TF1.')
model, _, _, _ = self._create_model()
inputs_shape = [2, 2, 2, 3]
inputs = tf.cast(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32),
dtype=tf.float32)
model(inputs)
detection_var_names = sorted([
var.name for var in model.restore_from_objects('detection')[
'model']._feature_extractor.weights
])
expected_detection_names = [
'ssd_meta_arch/fake_ssd_keras_feature_extractor/mock_model/layer1/bias:0',
'ssd_meta_arch/fake_ssd_keras_feature_extractor/mock_model/layer1/kernel:0'
]
self.assertEqual(detection_var_names, expected_detection_names)
full_var_names = sorted([
var.name for var in
model.restore_from_objects('full')['model'].weights
])
exepcted_full_names = ['box_predictor_var:0'] + expected_detection_names
self.assertEqual(exepcted_full_names, full_var_names)
# TODO(vighneshb) Add similar test for classification checkpoint type.
# TODO(vighneshb) Test loading a checkpoint from disk to verify that
# checkpoints are loaded correctly.
def test_loss_results_are_correct_with_random_example_sampling(self):
with test_utils.GraphContextOrNone() as g:
model, num_classes, _, _ = self._create_model(
random_example_sampling=True)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(
preprocessed_tensor, true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
# Among 4 anchors (1 positive, 3 negative) in this test, only 2 anchors are
# selected (1 positive, 1 negative) since random sampler will adjust number
# of negative examples to make sure positive example fraction in the batch
# is 0.5.
expected_classification_loss = (
batch_size * 2 * (num_classes + 1) * np.log(2.0))
(localization_loss, classification_loss) = self.execute_cpu(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
], graph=g)
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
if __name__ == '__main__':
tf.test.main()
| 34,460 | 47.400281 | 104 | py |
models | models-master/research/object_detection/meta_architectures/faster_rcnn_meta_arch_test_lib.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.faster_rcnn_meta_arch."""
import functools
from absl.testing import parameterized
import numpy as np
import six
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.anchor_generators import multiscale_grid_anchor_generator
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.builders import post_processing_builder
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import target_assigner
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
from object_detection.protos import post_processing_pb2
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
try:
import tf_slim as slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
BOX_CODE_SIZE = 4
class FakeFasterRCNNFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Fake feature extractor to use in tests."""
def __init__(self):
super(FakeFasterRCNNFeatureExtractor, self).__init__(
is_training=False,
first_stage_features_stride=32,
reuse_weights=None,
weight_decay=0.0)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def _extract_proposal_features(self, preprocessed_inputs, scope):
with tf.variable_scope('mock_model'):
proposal_features = 0 * slim.conv2d(
preprocessed_inputs, num_outputs=3, kernel_size=1, scope='layer1')
return proposal_features, {}
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope('mock_model'):
return 0 * slim.conv2d(
proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer2')
class FakeFasterRCNNMultiLevelFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Fake feature extractor to use in tests."""
def __init__(self):
super(FakeFasterRCNNMultiLevelFeatureExtractor, self).__init__(
is_training=False,
first_stage_features_stride=32,
reuse_weights=None,
weight_decay=0.0)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def _extract_proposal_features(self, preprocessed_inputs, scope):
with tf.variable_scope('mock_model'):
proposal_features_1 = 0 * slim.conv2d(
preprocessed_inputs, num_outputs=3, kernel_size=3, scope='layer1',
padding='VALID')
proposal_features_2 = 0 * slim.conv2d(
proposal_features_1, num_outputs=3, kernel_size=3, scope='layer2',
padding='VALID')
return [proposal_features_1, proposal_features_2], {}
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope('mock_model'):
return 0 * slim.conv2d(
proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer3')
class FakeFasterRCNNKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Fake feature extractor to use in tests."""
def __init__(self):
super(FakeFasterRCNNKerasFeatureExtractor, self).__init__(
is_training=False,
first_stage_features_stride=32,
weight_decay=0.0)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def get_proposal_feature_extractor_model(self, name):
class ProposalFeatureExtractor(tf.keras.Model):
"""Dummy proposal feature extraction."""
def __init__(self, name):
super(ProposalFeatureExtractor, self).__init__(name=name)
self.conv = None
def build(self, input_shape):
self.conv = tf.keras.layers.Conv2D(
3, kernel_size=1, padding='SAME', name='layer1')
def call(self, inputs):
return self.conv(inputs)
return ProposalFeatureExtractor(name=name)
def get_box_classifier_feature_extractor_model(self, name):
return tf.keras.Sequential([tf.keras.layers.Conv2D(
3, kernel_size=1, padding='SAME', name=name + '_layer2')])
class FakeFasterRCNNKerasMultilevelFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Fake feature extractor to use in tests."""
def __init__(self):
super(FakeFasterRCNNKerasMultilevelFeatureExtractor, self).__init__(
is_training=False,
first_stage_features_stride=32,
weight_decay=0.0)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def get_proposal_feature_extractor_model(self, name):
class ProposalFeatureExtractor(tf.keras.Model):
"""Dummy proposal feature extraction."""
def __init__(self, name):
super(ProposalFeatureExtractor, self).__init__(name=name)
self.conv = None
def build(self, input_shape):
self.conv = tf.keras.layers.Conv2D(
3, kernel_size=3, name='layer1')
self.conv_1 = tf.keras.layers.Conv2D(
3, kernel_size=3, name='layer1')
def call(self, inputs):
output_1 = self.conv(inputs)
output_2 = self.conv_1(output_1)
return [output_1, output_2]
return ProposalFeatureExtractor(name=name)
class FasterRCNNMetaArchTestBase(test_case.TestCase, parameterized.TestCase):
"""Base class to test Faster R-CNN and R-FCN meta architectures."""
def _build_arg_scope_with_hyperparams(self,
hyperparams_text_proto,
is_training):
hyperparams = hyperparams_pb2.Hyperparams()
text_format.Merge(hyperparams_text_proto, hyperparams)
return hyperparams_builder.build(hyperparams, is_training=is_training)
def _build_keras_layer_hyperparams(self, hyperparams_text_proto):
hyperparams = hyperparams_pb2.Hyperparams()
text_format.Merge(hyperparams_text_proto, hyperparams)
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def _get_second_stage_box_predictor_text_proto(
self, share_box_across_classes=False):
share_box_field = 'true' if share_box_across_classes else 'false'
box_predictor_text_proto = """
mask_rcnn_box_predictor {{
fc_hyperparams {{
op: FC
activation: NONE
regularizer {{
l2_regularizer {{
weight: 0.0005
}}
}}
initializer {{
variance_scaling_initializer {{
factor: 1.0
uniform: true
mode: FAN_AVG
}}
}}
}}
share_box_across_classes: {share_box_across_classes}
}}
""".format(share_box_across_classes=share_box_field)
return box_predictor_text_proto
def _add_mask_to_second_stage_box_predictor_text_proto(
self, masks_are_class_agnostic=False):
agnostic = 'true' if masks_are_class_agnostic else 'false'
box_predictor_text_proto = """
mask_rcnn_box_predictor {
predict_instance_masks: true
masks_are_class_agnostic: """ + agnostic + """
mask_height: 14
mask_width: 14
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
}
"""
return box_predictor_text_proto
def _get_second_stage_box_predictor(self, num_classes, is_training,
predict_masks, masks_are_class_agnostic,
share_box_across_classes=False,
use_keras=False):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(self._get_second_stage_box_predictor_text_proto(
share_box_across_classes), box_predictor_proto)
if predict_masks:
text_format.Merge(
self._add_mask_to_second_stage_box_predictor_text_proto(
masks_are_class_agnostic),
box_predictor_proto)
if use_keras:
return box_predictor_builder.build_keras(
hyperparams_builder.KerasLayerHyperparams,
inplace_batchnorm_update=False,
freeze_batchnorm=False,
box_predictor_config=box_predictor_proto,
num_classes=num_classes,
num_predictions_per_location_list=None,
is_training=is_training)
else:
return box_predictor_builder.build(
hyperparams_builder.build,
box_predictor_proto,
num_classes=num_classes,
is_training=is_training)
def _get_model(self, box_predictor, keras_model=False, **common_kwargs):
return faster_rcnn_meta_arch.FasterRCNNMetaArch(
initial_crop_size=3,
maxpool_kernel_size=1,
maxpool_stride=1,
second_stage_mask_rcnn_box_predictor=box_predictor,
**common_kwargs)
def _build_model(self,
is_training,
number_of_stages,
second_stage_batch_size,
first_stage_max_proposals=8,
num_classes=2,
hard_mining=False,
softmax_second_stage_classification_loss=True,
predict_masks=False,
pad_to_max_dimension=None,
masks_are_class_agnostic=False,
use_matmul_crop_and_resize=False,
clip_anchors_to_image=False,
use_matmul_gather_in_matcher=False,
use_static_shapes=False,
calibration_mapping_value=None,
share_box_across_classes=False,
return_raw_detections_during_predict=False,
output_final_box_features=False,
multi_level=False):
use_keras = tf_version.is_tf2()
def image_resizer_fn(image, masks=None):
"""Fake image resizer function."""
resized_inputs = []
resized_image = tf.identity(image)
if pad_to_max_dimension is not None:
resized_image = tf.image.pad_to_bounding_box(image, 0, 0,
pad_to_max_dimension,
pad_to_max_dimension)
resized_inputs.append(resized_image)
if masks is not None:
resized_masks = tf.identity(masks)
if pad_to_max_dimension is not None:
resized_masks = tf.image.pad_to_bounding_box(tf.transpose(masks,
[1, 2, 0]),
0, 0,
pad_to_max_dimension,
pad_to_max_dimension)
resized_masks = tf.transpose(resized_masks, [2, 0, 1])
resized_inputs.append(resized_masks)
resized_inputs.append(tf.shape(image))
return resized_inputs
# anchors in this test are designed so that a subset of anchors are inside
# the image and a subset of anchors are outside.
first_stage_anchor_generator = None
if multi_level:
min_level = 0
max_level = 1
anchor_scale = 0.1
aspect_ratios = [1.0, 2.0, 0.5]
scales_per_octave = 2
normalize_coordinates = False
(first_stage_anchor_generator
) = multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates)
else:
first_stage_anchor_scales = (0.001, 0.005, 0.1)
first_stage_anchor_aspect_ratios = (0.5, 1.0, 2.0)
first_stage_anchor_strides = (1, 1)
first_stage_anchor_generator = grid_anchor_generator.GridAnchorGenerator(
first_stage_anchor_scales,
first_stage_anchor_aspect_ratios,
anchor_stride=first_stage_anchor_strides)
first_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'proposal',
use_matmul_gather=use_matmul_gather_in_matcher)
if use_keras:
if multi_level:
fake_feature_extractor = FakeFasterRCNNKerasMultilevelFeatureExtractor()
else:
fake_feature_extractor = FakeFasterRCNNKerasFeatureExtractor()
else:
if multi_level:
fake_feature_extractor = FakeFasterRCNNMultiLevelFeatureExtractor()
else:
fake_feature_extractor = FakeFasterRCNNFeatureExtractor()
first_stage_box_predictor_hyperparams_text_proto = """
op: CONV
activation: RELU
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
}
}
"""
if use_keras:
first_stage_box_predictor_arg_scope_fn = (
self._build_keras_layer_hyperparams(
first_stage_box_predictor_hyperparams_text_proto))
else:
first_stage_box_predictor_arg_scope_fn = (
self._build_arg_scope_with_hyperparams(
first_stage_box_predictor_hyperparams_text_proto, is_training))
first_stage_box_predictor_kernel_size = 3
first_stage_atrous_rate = 1
first_stage_box_predictor_depth = 512
first_stage_minibatch_size = 3
first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=0.5, is_static=use_static_shapes)
first_stage_nms_score_threshold = -1.0
first_stage_nms_iou_threshold = 1.0
first_stage_max_proposals = first_stage_max_proposals
first_stage_non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=first_stage_nms_score_threshold,
iou_thresh=first_stage_nms_iou_threshold,
max_size_per_class=first_stage_max_proposals,
max_total_size=first_stage_max_proposals,
use_static_shapes=use_static_shapes)
first_stage_localization_loss_weight = 1.0
first_stage_objectness_loss_weight = 1.0
post_processing_config = post_processing_pb2.PostProcessing()
post_processing_text_proto = """
score_converter: IDENTITY
batch_non_max_suppression {
score_threshold: -20.0
iou_threshold: 1.0
max_detections_per_class: 5
max_total_detections: 5
use_static_shapes: """ +'{}'.format(use_static_shapes) + """
}
"""
if calibration_mapping_value:
calibration_text_proto = """
calibration_config {
function_approximation {
x_y_pairs {
x_y_pair {
x: 0.0
y: %f
}
x_y_pair {
x: 1.0
y: %f
}}}}""" % (calibration_mapping_value, calibration_mapping_value)
post_processing_text_proto = (post_processing_text_proto
+ ' ' + calibration_text_proto)
text_format.Merge(post_processing_text_proto, post_processing_config)
second_stage_non_max_suppression_fn, second_stage_score_conversion_fn = (
post_processing_builder.build(post_processing_config))
second_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN', 'detection',
use_matmul_gather=use_matmul_gather_in_matcher)
second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=1.0, is_static=use_static_shapes)
second_stage_localization_loss_weight = 1.0
second_stage_classification_loss_weight = 1.0
if softmax_second_stage_classification_loss:
second_stage_classification_loss = (
losses.WeightedSoftmaxClassificationLoss())
else:
second_stage_classification_loss = (
losses.WeightedSigmoidClassificationLoss())
hard_example_miner = None
if hard_mining:
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=1,
iou_threshold=0.99,
loss_type='both',
cls_loss_weight=second_stage_classification_loss_weight,
loc_loss_weight=second_stage_localization_loss_weight,
max_negatives_per_positive=None)
crop_and_resize_fn = (
spatial_ops.multilevel_matmul_crop_and_resize
if use_matmul_crop_and_resize
else spatial_ops.multilevel_native_crop_and_resize)
common_kwargs = {
'is_training':
is_training,
'num_classes':
num_classes,
'image_resizer_fn':
image_resizer_fn,
'feature_extractor':
fake_feature_extractor,
'number_of_stages':
number_of_stages,
'first_stage_anchor_generator':
first_stage_anchor_generator,
'first_stage_target_assigner':
first_stage_target_assigner,
'first_stage_atrous_rate':
first_stage_atrous_rate,
'first_stage_box_predictor_arg_scope_fn':
first_stage_box_predictor_arg_scope_fn,
'first_stage_box_predictor_kernel_size':
first_stage_box_predictor_kernel_size,
'first_stage_box_predictor_depth':
first_stage_box_predictor_depth,
'first_stage_minibatch_size':
first_stage_minibatch_size,
'first_stage_sampler':
first_stage_sampler,
'first_stage_non_max_suppression_fn':
first_stage_non_max_suppression_fn,
'first_stage_max_proposals':
first_stage_max_proposals,
'first_stage_localization_loss_weight':
first_stage_localization_loss_weight,
'first_stage_objectness_loss_weight':
first_stage_objectness_loss_weight,
'second_stage_target_assigner':
second_stage_target_assigner,
'second_stage_batch_size':
second_stage_batch_size,
'second_stage_sampler':
second_stage_sampler,
'second_stage_non_max_suppression_fn':
second_stage_non_max_suppression_fn,
'second_stage_score_conversion_fn':
second_stage_score_conversion_fn,
'second_stage_localization_loss_weight':
second_stage_localization_loss_weight,
'second_stage_classification_loss_weight':
second_stage_classification_loss_weight,
'second_stage_classification_loss':
second_stage_classification_loss,
'hard_example_miner':
hard_example_miner,
'crop_and_resize_fn':
crop_and_resize_fn,
'clip_anchors_to_image':
clip_anchors_to_image,
'use_static_shapes':
use_static_shapes,
'resize_masks':
True,
'return_raw_detections_during_predict':
return_raw_detections_during_predict,
'output_final_box_features':
output_final_box_features
}
return self._get_model(
self._get_second_stage_box_predictor(
num_classes=num_classes,
is_training=is_training,
use_keras=use_keras,
predict_masks=predict_masks,
masks_are_class_agnostic=masks_are_class_agnostic,
share_box_across_classes=share_box_across_classes), **common_kwargs)
@parameterized.parameters(
{'use_static_shapes': False},
{'use_static_shapes': True},
)
def test_predict_gives_correct_shapes_in_inference_mode_first_stage_only(
self, use_static_shapes=False):
batch_size = 2
height = 10
width = 12
input_image_shape = (batch_size, height, width, 3)
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=False,
number_of_stages=1,
second_stage_batch_size=2,
clip_anchors_to_image=use_static_shapes,
use_static_shapes=use_static_shapes)
def graph_fn(images):
"""Function to construct tf graph for the test."""
preprocessed_inputs, true_image_shapes = model.preprocess(images)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
return (prediction_dict['rpn_box_predictor_features'][0],
prediction_dict['rpn_features_to_crop'][0],
prediction_dict['image_shape'],
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'])
images = np.zeros(input_image_shape, dtype=np.float32)
# In inference mode, anchors are clipped to the image window, but not
# pruned. Since MockFasterRCNN.extract_proposal_features returns a
# tensor with the same shape as its input, the expected number of anchors
# is height * width * the number of anchors per location (i.e. 3x3).
expected_num_anchors = height * width * 3 * 3
expected_output_shapes = {
'rpn_box_predictor_features': (batch_size, height, width, 512),
'rpn_features_to_crop': (batch_size, height, width, 3),
'rpn_box_encodings': (batch_size, expected_num_anchors, 4),
'rpn_objectness_predictions_with_background':
(batch_size, expected_num_anchors, 2),
'anchors': (expected_num_anchors, 4)
}
if use_static_shapes:
results = self.execute(graph_fn, [images], graph=g)
else:
results = self.execute_cpu(graph_fn, [images], graph=g)
self.assertAllEqual(results[0].shape,
expected_output_shapes['rpn_box_predictor_features'])
self.assertAllEqual(results[1].shape,
expected_output_shapes['rpn_features_to_crop'])
self.assertAllEqual(results[2],
input_image_shape)
self.assertAllEqual(results[3].shape,
expected_output_shapes['rpn_box_encodings'])
self.assertAllEqual(
results[4].shape,
expected_output_shapes['rpn_objectness_predictions_with_background'])
self.assertAllEqual(results[5].shape,
expected_output_shapes['anchors'])
# Check that anchors are clipped to window.
anchors = results[5]
self.assertTrue(np.all(np.greater_equal(anchors, 0)))
self.assertTrue(np.all(np.less_equal(anchors[:, 0], height)))
self.assertTrue(np.all(np.less_equal(anchors[:, 1], width)))
self.assertTrue(np.all(np.less_equal(anchors[:, 2], height)))
self.assertTrue(np.all(np.less_equal(anchors[:, 3], width)))
@parameterized.parameters(
{'use_static_shapes': False},
{'use_static_shapes': True},
)
def test_predict_shape_in_inference_mode_first_stage_only_multi_level(
self, use_static_shapes):
batch_size = 2
height = 50
width = 52
input_image_shape = (batch_size, height, width, 3)
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=False,
number_of_stages=1,
second_stage_batch_size=2,
clip_anchors_to_image=use_static_shapes,
use_static_shapes=use_static_shapes,
multi_level=True)
def graph_fn(images):
"""Function to construct tf graph for the test."""
preprocessed_inputs, true_image_shapes = model.preprocess(images)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
return (prediction_dict['rpn_box_predictor_features'][0],
prediction_dict['rpn_box_predictor_features'][1],
prediction_dict['rpn_features_to_crop'][0],
prediction_dict['rpn_features_to_crop'][1],
prediction_dict['image_shape'],
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'])
images = np.zeros(input_image_shape, dtype=np.float32)
# In inference mode, anchors are clipped to the image window, but not
# pruned. Since MockFasterRCNN.extract_proposal_features returns a
# tensor with the same shape as its input, the expected number of anchors
# is height * width * the number of anchors per location (i.e. 3x3).
expected_num_anchors = ((height-2) * (width-2) + (height-4) * (width-4)) * 6
expected_output_shapes = {
'rpn_box_predictor_features_0': (batch_size, height-2, width-2, 512),
'rpn_box_predictor_features_1': (batch_size, height-4, width-4, 512),
'rpn_features_to_crop_0': (batch_size, height-2, width-2, 3),
'rpn_features_to_crop_1': (batch_size, height-4, width-4, 3),
'rpn_box_encodings': (batch_size, expected_num_anchors, 4),
'rpn_objectness_predictions_with_background':
(batch_size, expected_num_anchors, 2),
}
if use_static_shapes:
expected_output_shapes['anchors'] = (expected_num_anchors, 4)
else:
expected_output_shapes['anchors'] = (18300, 4)
if use_static_shapes:
results = self.execute(graph_fn, [images], graph=g)
else:
results = self.execute_cpu(graph_fn, [images], graph=g)
self.assertAllEqual(results[0].shape,
expected_output_shapes['rpn_box_predictor_features_0'])
self.assertAllEqual(results[1].shape,
expected_output_shapes['rpn_box_predictor_features_1'])
self.assertAllEqual(results[2].shape,
expected_output_shapes['rpn_features_to_crop_0'])
self.assertAllEqual(results[3].shape,
expected_output_shapes['rpn_features_to_crop_1'])
self.assertAllEqual(results[4],
input_image_shape)
self.assertAllEqual(results[5].shape,
expected_output_shapes['rpn_box_encodings'])
self.assertAllEqual(
results[6].shape,
expected_output_shapes['rpn_objectness_predictions_with_background'])
self.assertAllEqual(results[7].shape,
expected_output_shapes['anchors'])
# Check that anchors are clipped to window.
anchors = results[5]
self.assertTrue(np.all(np.greater_equal(anchors, 0)))
self.assertTrue(np.all(np.less_equal(anchors[:, 0], height)))
self.assertTrue(np.all(np.less_equal(anchors[:, 1], width)))
self.assertTrue(np.all(np.less_equal(anchors[:, 2], height)))
self.assertTrue(np.all(np.less_equal(anchors[:, 3], width)))
def test_regularization_losses(self):
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True, number_of_stages=1, second_stage_batch_size=2)
def graph_fn():
batch_size = 2
height = 10
width = 12
input_image_shape = (batch_size, height, width, 3)
image, true_image_shapes = model.preprocess(tf.zeros(input_image_shape))
model.predict(image, true_image_shapes)
reg_losses = tf.math.add_n(model.regularization_losses())
return reg_losses
reg_losses = self.execute(graph_fn, [], graph=g)
self.assertGreaterEqual(reg_losses, 0)
def test_predict_gives_valid_anchors_in_training_mode_first_stage_only(self):
expected_output_keys = set([
'rpn_box_predictor_features', 'rpn_features_to_crop', 'image_shape',
'rpn_box_encodings', 'rpn_objectness_predictions_with_background',
'anchors', 'feature_maps'])
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True, number_of_stages=1, second_stage_batch_size=2,)
batch_size = 2
height = 10
width = 12
input_image_shape = (batch_size, height, width, 3)
def graph_fn():
image, true_image_shapes = model.preprocess(tf.zeros(input_image_shape))
prediction_dict = model.predict(image, true_image_shapes)
self.assertEqual(set(prediction_dict.keys()), expected_output_keys)
return (prediction_dict['image_shape'], prediction_dict['anchors'],
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'])
(image_shape, anchors, rpn_box_encodings,
rpn_objectness_predictions_with_background) = self.execute(graph_fn, [],
graph=g)
# At training time, anchors that exceed image bounds are pruned. Thus
# the `expected_num_anchors` in the above inference mode test is now
# a strict upper bound on the number of anchors.
num_anchors_strict_upper_bound = height * width * 3 * 3
self.assertAllEqual(image_shape, input_image_shape)
self.assertTrue(len(anchors.shape) == 2 and anchors.shape[1] == 4)
num_anchors_out = anchors.shape[0]
self.assertLess(num_anchors_out, num_anchors_strict_upper_bound)
self.assertTrue(np.all(np.greater_equal(anchors, 0)))
self.assertTrue(np.all(np.less_equal(anchors[:, 0], height)))
self.assertTrue(np.all(np.less_equal(anchors[:, 1], width)))
self.assertTrue(np.all(np.less_equal(anchors[:, 2], height)))
self.assertTrue(np.all(np.less_equal(anchors[:, 3], width)))
self.assertAllEqual(rpn_box_encodings.shape,
(batch_size, num_anchors_out, 4))
self.assertAllEqual(
rpn_objectness_predictions_with_background.shape,
(batch_size, num_anchors_out, 2))
@parameterized.parameters(
{'use_static_shapes': False},
{'use_static_shapes': True},
)
def test_predict_correct_shapes_in_inference_mode_two_stages(
self, use_static_shapes):
def compare_results(results, expected_output_shapes):
"""Checks if the shape of the predictions are as expected."""
self.assertAllEqual(results[0][0].shape,
expected_output_shapes['rpn_box_predictor_features'])
self.assertAllEqual(results[1][0].shape,
expected_output_shapes['rpn_features_to_crop'])
self.assertAllEqual(results[2].shape,
expected_output_shapes['image_shape'])
self.assertAllEqual(results[3].shape,
expected_output_shapes['rpn_box_encodings'])
self.assertAllEqual(
results[4].shape,
expected_output_shapes['rpn_objectness_predictions_with_background'])
self.assertAllEqual(results[5].shape,
expected_output_shapes['anchors'])
self.assertAllEqual(results[6].shape,
expected_output_shapes['refined_box_encodings'])
self.assertAllEqual(
results[7].shape,
expected_output_shapes['class_predictions_with_background'])
self.assertAllEqual(results[8].shape,
expected_output_shapes['num_proposals'])
self.assertAllEqual(results[9].shape,
expected_output_shapes['proposal_boxes'])
self.assertAllEqual(results[10].shape,
expected_output_shapes['proposal_boxes_normalized'])
self.assertAllEqual(results[11].shape,
expected_output_shapes['box_classifier_features'])
self.assertAllEqual(results[12].shape,
expected_output_shapes['final_anchors'])
batch_size = 2
image_size = 10
max_num_proposals = 8
initial_crop_size = 3
maxpool_stride = 1
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=False,
number_of_stages=2,
second_stage_batch_size=2,
predict_masks=False,
use_matmul_crop_and_resize=use_static_shapes,
clip_anchors_to_image=use_static_shapes,
use_static_shapes=use_static_shapes)
def graph_fn():
"""A function with TF compute."""
if use_static_shapes:
images = tf.random_uniform((batch_size, image_size, image_size, 3))
else:
images = tf.random_uniform((tf.random_uniform([],
minval=batch_size,
maxval=batch_size + 1,
dtype=tf.int32),
tf.random_uniform([],
minval=image_size,
maxval=image_size + 1,
dtype=tf.int32),
tf.random_uniform([],
minval=image_size,
maxval=image_size + 1,
dtype=tf.int32), 3))
preprocessed_inputs, true_image_shapes = model.preprocess(images)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
return (prediction_dict['rpn_box_predictor_features'],
prediction_dict['rpn_features_to_crop'],
prediction_dict['image_shape'],
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'],
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['num_proposals'],
prediction_dict['proposal_boxes'],
prediction_dict['proposal_boxes_normalized'],
prediction_dict['box_classifier_features'],
prediction_dict['final_anchors'])
expected_num_anchors = image_size * image_size * 3 * 3
expected_shapes = {
'rpn_box_predictor_features':
(2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'image_shape': (4,),
'rpn_box_encodings': (2, expected_num_anchors, 4),
'rpn_objectness_predictions_with_background':
(2, expected_num_anchors, 2),
'anchors': (expected_num_anchors, 4),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'num_proposals': (2,),
'proposal_boxes': (2, max_num_proposals, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
3),
'feature_maps': [(2, image_size, image_size, 512)],
'final_anchors': (2, max_num_proposals, 4)
}
if use_static_shapes:
results = self.execute(graph_fn, [], graph=g)
else:
results = self.execute_cpu(graph_fn, [], graph=g)
compare_results(results, expected_shapes)
@parameterized.parameters(
{'use_static_shapes': False},
{'use_static_shapes': True},
)
def test_predict_gives_correct_shapes_in_train_mode_both_stages(
self,
use_static_shapes=False):
batch_size = 2
image_size = 10
max_num_proposals = 7
initial_crop_size = 3
maxpool_stride = 1
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True,
number_of_stages=2,
second_stage_batch_size=7,
predict_masks=False,
use_matmul_crop_and_resize=use_static_shapes,
clip_anchors_to_image=use_static_shapes,
use_static_shapes=use_static_shapes)
def graph_fn(images, gt_boxes, gt_classes, gt_weights):
"""Function to construct tf graph for the test."""
preprocessed_inputs, true_image_shapes = model.preprocess(images)
model.provide_groundtruth(
groundtruth_boxes_list=tf.unstack(gt_boxes),
groundtruth_classes_list=tf.unstack(gt_classes),
groundtruth_weights_list=tf.unstack(gt_weights))
result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes)
return (result_tensor_dict['refined_box_encodings'],
result_tensor_dict['class_predictions_with_background'],
result_tensor_dict['proposal_boxes'],
result_tensor_dict['proposal_boxes_normalized'],
result_tensor_dict['anchors'],
result_tensor_dict['rpn_box_encodings'],
result_tensor_dict['rpn_objectness_predictions_with_background'],
result_tensor_dict['rpn_features_to_crop'][0],
result_tensor_dict['rpn_box_predictor_features'][0],
result_tensor_dict['final_anchors'],
)
image_shape = (batch_size, image_size, image_size, 3)
images = np.zeros(image_shape, dtype=np.float32)
gt_boxes = np.stack([
np.array([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=np.float32),
np.array([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=np.float32)
])
gt_classes = np.stack([
np.array([[1, 0], [0, 1]], dtype=np.float32),
np.array([[1, 0], [1, 0]], dtype=np.float32)
])
gt_weights = np.stack([
np.array([1, 1], dtype=np.float32),
np.array([1, 1], dtype=np.float32)
])
if use_static_shapes:
results = self.execute(graph_fn,
[images, gt_boxes, gt_classes, gt_weights],
graph=g)
else:
results = self.execute_cpu(graph_fn,
[images, gt_boxes, gt_classes, gt_weights],
graph=g)
expected_shapes = {
'rpn_box_predictor_features': (2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'proposal_boxes': (2, max_num_proposals, 4),
'rpn_box_encodings': (2, image_size * image_size * 9, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(
image_size, batch_size, max_num_proposals, initial_crop_size,
maxpool_stride, 3),
'rpn_objectness_predictions_with_background':
(2, image_size * image_size * 9, 2),
'final_anchors': (2, max_num_proposals, 4)
}
# TODO(rathodv): Possibly change utils/test_case.py to accept dictionaries
# and return dicionaries so don't have to rely on the order of tensors.
self.assertAllEqual(results[0].shape,
expected_shapes['refined_box_encodings'])
self.assertAllEqual(results[1].shape,
expected_shapes['class_predictions_with_background'])
self.assertAllEqual(results[2].shape, expected_shapes['proposal_boxes'])
self.assertAllEqual(results[3].shape,
expected_shapes['proposal_boxes_normalized'])
anchors_shape = results[4].shape
self.assertAllEqual(results[5].shape,
[batch_size, anchors_shape[0], 4])
self.assertAllEqual(results[6].shape,
[batch_size, anchors_shape[0], 2])
self.assertAllEqual(results[7].shape,
expected_shapes['rpn_features_to_crop'])
self.assertAllEqual(results[8].shape,
expected_shapes['rpn_box_predictor_features'])
self.assertAllEqual(results[9].shape,
expected_shapes['final_anchors'])
@parameterized.parameters(
{'use_static_shapes': False, 'pad_to_max_dimension': None},
{'use_static_shapes': True, 'pad_to_max_dimension': None},
{'use_static_shapes': False, 'pad_to_max_dimension': 56,},
{'use_static_shapes': True, 'pad_to_max_dimension': 56},
)
def test_postprocess_first_stage_only_inference_mode(
self, use_static_shapes=False,
pad_to_max_dimension=None):
batch_size = 2
first_stage_max_proposals = 4 if use_static_shapes else 8
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=False,
number_of_stages=1, second_stage_batch_size=6,
use_matmul_crop_and_resize=use_static_shapes,
clip_anchors_to_image=use_static_shapes,
use_static_shapes=use_static_shapes,
use_matmul_gather_in_matcher=use_static_shapes,
first_stage_max_proposals=first_stage_max_proposals,
pad_to_max_dimension=pad_to_max_dimension)
def graph_fn(images,
rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
anchors):
"""Function to construct tf graph for the test."""
preprocessed_images, true_image_shapes = model.preprocess(images)
proposals = model.postprocess({
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'rpn_features_to_crop': rpn_features_to_crop,
'image_shape': tf.shape(preprocessed_images),
'anchors': anchors}, true_image_shapes)
return (proposals['num_detections'], proposals['detection_boxes'],
proposals['detection_scores'], proposals['raw_detection_boxes'],
proposals['raw_detection_scores'])
anchors = np.array(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=np.float32)
rpn_box_encodings = np.zeros(
(batch_size, anchors.shape[0], BOX_CODE_SIZE), dtype=np.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = np.array([
[[-10, 13],
[10, -10],
[10, -11],
[-10, 12]],
[[10, -10],
[-10, 13],
[-10, 12],
[10, -11]]], dtype=np.float32)
rpn_features_to_crop = np.ones((batch_size, 8, 8, 10), dtype=np.float32)
image_shape = (batch_size, 32, 32, 3)
images = np.zeros(image_shape, dtype=np.float32)
if use_static_shapes:
results = self.execute(graph_fn,
[images, rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop, anchors], graph=g)
else:
results = self.execute_cpu(graph_fn,
[images, rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop, anchors], graph=g)
expected_proposal_boxes = [
[[0, 0, .5, .5], [.5, .5, 1, 1], [0, .5, .5, 1], [.5, 0, 1.0, .5]]
+ 4 * [4 * [0]],
[[0, .5, .5, 1], [.5, 0, 1.0, .5], [0, 0, .5, .5], [.5, .5, 1, 1]]
+ 4 * [4 * [0]]]
expected_proposal_scores = [[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0]]
expected_num_proposals = [4, 4]
expected_raw_proposal_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]],
[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]]]
expected_raw_scores = [[[0., 1.], [1., 0.], [1., 0.], [0., 1.]],
[[1., 0.], [0., 1.], [0., 1.], [1., 0.]]]
if pad_to_max_dimension is not None:
expected_raw_proposal_boxes = (np.array(expected_raw_proposal_boxes) *
32 / pad_to_max_dimension)
expected_proposal_boxes = (np.array(expected_proposal_boxes) *
32 / pad_to_max_dimension)
self.assertAllClose(results[0], expected_num_proposals)
for indx, num_proposals in enumerate(expected_num_proposals):
self.assertAllClose(results[1][indx][0:num_proposals],
expected_proposal_boxes[indx][0:num_proposals])
self.assertAllClose(results[2][indx][0:num_proposals],
expected_proposal_scores[indx][0:num_proposals])
self.assertAllClose(results[3], expected_raw_proposal_boxes)
self.assertAllClose(results[4], expected_raw_scores)
def _test_postprocess_first_stage_only_train_mode(self,
pad_to_max_dimension=None):
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True,
number_of_stages=1, second_stage_batch_size=2,
pad_to_max_dimension=pad_to_max_dimension)
batch_size = 2
def graph_fn():
"""A function with TF compute."""
anchors = tf.constant(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=tf.float32)
rpn_box_encodings = tf.zeros(
[batch_size, anchors.get_shape().as_list()[0],
BOX_CODE_SIZE], dtype=tf.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = tf.constant([
[[-10, 13],
[-10, 12],
[-10, 11],
[-10, 10]],
[[-10, 13],
[-10, 12],
[-10, 11],
[-10, 10]]], dtype=tf.float32)
rpn_features_to_crop = tf.ones((batch_size, 8, 8, 10), dtype=tf.float32)
image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32)
groundtruth_boxes_list = [
tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32),
tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)]
groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]],
dtype=tf.float32),
tf.constant([[1, 0], [1, 0]],
dtype=tf.float32)]
groundtruth_weights_list = [
tf.constant([1, 1], dtype=tf.float32),
tf.constant([1, 1], dtype=tf.float32)
]
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_weights_list=groundtruth_weights_list)
proposals = model.postprocess({
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'rpn_features_to_crop': rpn_features_to_crop,
'anchors': anchors,
'image_shape': image_shape}, true_image_shapes)
return (proposals['detection_boxes'], proposals['detection_scores'],
proposals['num_detections'],
proposals['detection_multiclass_scores'],
proposals['raw_detection_boxes'],
proposals['raw_detection_scores'])
expected_proposal_boxes = [
[[0, 0, .5, .5], [.5, .5, 1, 1]], [[0, .5, .5, 1], [.5, 0, 1, .5]]]
expected_proposal_scores = [[1, 1],
[1, 1]]
expected_proposal_multiclass_scores = [[[0., 1.], [0., 1.]],
[[0., 1.], [0., 1.]]]
expected_raw_proposal_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]],
[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]]]
expected_raw_scores = [[[0., 1.], [0., 1.], [0., 1.], [0., 1.]],
[[0., 1.], [0., 1.], [0., 1.], [0., 1.]]]
(proposal_boxes, proposal_scores, batch_num_detections,
batch_multiclass_scores, raw_detection_boxes,
raw_detection_scores) = self.execute_cpu(graph_fn, [], graph=g)
for image_idx in range(batch_size):
num_detections = int(batch_num_detections[image_idx])
boxes = proposal_boxes[image_idx][:num_detections, :].tolist()
scores = proposal_scores[image_idx][:num_detections].tolist()
multiclass_scores = batch_multiclass_scores[
image_idx][:num_detections, :].tolist()
expected_boxes = expected_proposal_boxes[image_idx]
expected_scores = expected_proposal_scores[image_idx]
expected_multiclass_scores = expected_proposal_multiclass_scores[
image_idx]
self.assertTrue(
test_utils.first_rows_close_as_set(boxes, expected_boxes))
self.assertTrue(
test_utils.first_rows_close_as_set(scores, expected_scores))
self.assertTrue(
test_utils.first_rows_close_as_set(multiclass_scores,
expected_multiclass_scores))
self.assertAllClose(raw_detection_boxes, expected_raw_proposal_boxes)
self.assertAllClose(raw_detection_scores, expected_raw_scores)
@parameterized.parameters(
{'pad_to_max_dimension': 56},
{'pad_to_max_dimension': None}
)
def test_postprocess_first_stage_only_train_mode_padded_image(
self, pad_to_max_dimension):
self._test_postprocess_first_stage_only_train_mode(pad_to_max_dimension)
@parameterized.parameters(
{'use_static_shapes': False, 'pad_to_max_dimension': None},
{'use_static_shapes': True, 'pad_to_max_dimension': None},
{'use_static_shapes': False, 'pad_to_max_dimension': 56},
{'use_static_shapes': True, 'pad_to_max_dimension': 56},
)
def test_postprocess_second_stage_only_inference_mode(
self, use_static_shapes=False,
pad_to_max_dimension=None):
batch_size = 2
num_classes = 2
image_shape = np.array((2, 36, 48, 3), dtype=np.int32)
first_stage_max_proposals = 8
total_num_padded_proposals = batch_size * first_stage_max_proposals
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=False,
number_of_stages=2,
second_stage_batch_size=6,
use_matmul_crop_and_resize=use_static_shapes,
clip_anchors_to_image=use_static_shapes,
use_static_shapes=use_static_shapes,
use_matmul_gather_in_matcher=use_static_shapes,
pad_to_max_dimension=pad_to_max_dimension)
def graph_fn(images,
refined_box_encodings,
class_predictions_with_background,
num_proposals,
proposal_boxes):
"""Function to construct tf graph for the test."""
_, true_image_shapes = model.preprocess(images)
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': proposal_boxes,
}, true_image_shapes)
return (detections['num_detections'], detections['detection_boxes'],
detections['detection_scores'], detections['detection_classes'],
detections['raw_detection_boxes'],
detections['raw_detection_scores'],
detections['detection_multiclass_scores'],
detections['detection_anchor_indices'])
proposal_boxes = np.array(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=np.float32)
num_proposals = np.array([3, 2], dtype=np.int32)
refined_box_encodings = np.zeros(
[total_num_padded_proposals, num_classes, 4], dtype=np.float32)
class_predictions_with_background = np.ones(
[total_num_padded_proposals, num_classes+1], dtype=np.float32)
images = np.zeros(image_shape, dtype=np.float32)
if use_static_shapes:
results = self.execute(graph_fn,
[images, refined_box_encodings,
class_predictions_with_background,
num_proposals, proposal_boxes], graph=g)
else:
results = self.execute_cpu(graph_fn,
[images, refined_box_encodings,
class_predictions_with_background,
num_proposals, proposal_boxes], graph=g)
# Note that max_total_detections=5 in the NMS config.
expected_num_detections = [5, 4]
expected_detection_classes = [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]
expected_detection_scores = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]
expected_multiclass_scores = [[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]]
# Note that a single anchor can be used for multiple detections (predictions
# are made independently per class).
expected_anchor_indices = [[0, 1, 2, 0, 1],
[0, 1, 0, 1]]
h = float(image_shape[1])
w = float(image_shape[2])
expected_raw_detection_boxes = np.array(
[[[1 / h, 1 / w, 2 / h, 3 / w], [0, 0, 1 / h, 1 / w],
[.5 / h, .5 / w, .6 / h, .6 / w], 4 * [0], 4 * [0], 4 * [0], 4 * [0],
4 * [0]],
[[2 / h, 3 / w, 6 / h, 8 / w], [1 / h, 2 / w, 5 / h, 3 / w], 4 * [0],
4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0]]],
dtype=np.float32)
self.assertAllClose(results[0], expected_num_detections)
for indx, num_proposals in enumerate(expected_num_detections):
self.assertAllClose(results[2][indx][0:num_proposals],
expected_detection_scores[indx][0:num_proposals])
self.assertAllClose(results[3][indx][0:num_proposals],
expected_detection_classes[indx][0:num_proposals])
self.assertAllClose(results[6][indx][0:num_proposals],
expected_multiclass_scores[indx][0:num_proposals])
self.assertAllClose(results[7][indx][0:num_proposals],
expected_anchor_indices[indx][0:num_proposals])
self.assertAllClose(results[4], expected_raw_detection_boxes)
self.assertAllClose(results[5],
class_predictions_with_background.reshape([-1, 8, 3]))
if not use_static_shapes:
self.assertAllEqual(results[1].shape, [2, 5, 4])
def test_preprocess_preserves_dynamic_input_shapes(self):
width = tf.random.uniform([], minval=5, maxval=10, dtype=tf.int32)
batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32)
shape = tf.stack([batch, 5, width, 3])
image = tf.random.uniform(shape)
model = self._build_model(
is_training=False, number_of_stages=2, second_stage_batch_size=6)
preprocessed_inputs, _ = model.preprocess(image)
self.assertTrue(
preprocessed_inputs.shape.is_compatible_with([None, 5, None, 3]))
def test_preprocess_preserves_static_input_shapes(self):
shape = tf.stack([2, 5, 5, 3])
image = tf.random.uniform(shape)
model = self._build_model(
is_training=False, number_of_stages=2, second_stage_batch_size=6)
preprocessed_inputs, _ = model.preprocess(image)
self.assertTrue(
preprocessed_inputs.shape.is_compatible_with([2, 5, 5, 3]))
# TODO(rathodv): Split test into two - with and without masks.
def test_loss_first_stage_only_mode(self):
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True,
number_of_stages=1, second_stage_batch_size=6)
batch_size = 2
def graph_fn():
"""A function with TF compute."""
anchors = tf.constant(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=tf.float32)
rpn_box_encodings = tf.zeros(
[batch_size,
anchors.get_shape().as_list()[0],
BOX_CODE_SIZE], dtype=tf.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = tf.constant([
[[-10, 13],
[10, -10],
[10, -11],
[-10, 12]],
[[10, -10],
[-10, 13],
[-10, 12],
[10, -11]]], dtype=tf.float32)
image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32)
groundtruth_boxes_list = [
tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32),
tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)]
groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]],
dtype=tf.float32),
tf.constant([[1, 0], [1, 0]],
dtype=tf.float32)]
prediction_dict = {
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'image_shape': image_shape,
'anchors': anchors
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
loss_dict = model.loss(prediction_dict, true_image_shapes)
self.assertNotIn('Loss/BoxClassifierLoss/localization_loss',
loss_dict)
self.assertNotIn('Loss/BoxClassifierLoss/classification_loss',
loss_dict)
return (loss_dict['Loss/RPNLoss/localization_loss'],
loss_dict['Loss/RPNLoss/objectness_loss'])
loc_loss, obj_loss = self.execute_cpu(graph_fn, [], graph=g)
self.assertAllClose(loc_loss, 0)
self.assertAllClose(obj_loss, 0)
# TODO(rathodv): Split test into two - with and without masks.
def test_loss_full(self):
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True,
number_of_stages=2, second_stage_batch_size=6)
batch_size = 3
def graph_fn():
"""A function with TF compute."""
anchors = tf.constant(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=tf.float32)
rpn_box_encodings = tf.zeros(
[batch_size,
anchors.get_shape().as_list()[0],
BOX_CODE_SIZE], dtype=tf.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = tf.constant(
[[[-10, 13], [10, -10], [10, -11], [-10, 12]],
[[10, -10], [-10, 13], [-10, 12], [10, -11]],
[[10, -10], [-10, 13], [-10, 12], [10, -11]]],
dtype=tf.float32)
image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32)
num_proposals = tf.constant([6, 6, 6], dtype=tf.int32)
proposal_boxes = tf.constant(
3 * [[[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16],
[16, 16, 32, 32], [0, 0, 16, 16], [0, 16, 16, 32]]],
dtype=tf.float32)
refined_box_encodings = tf.zeros(
(batch_size * model.max_num_proposals,
model.num_classes,
BOX_CODE_SIZE), dtype=tf.float32)
class_predictions_with_background = tf.constant(
[
[-10, 10, -10], # first image
[10, -10, -10],
[10, -10, -10],
[-10, -10, 10],
[-10, 10, -10],
[10, -10, -10],
[10, -10, -10], # second image
[-10, 10, -10],
[-10, 10, -10],
[10, -10, -10],
[10, -10, -10],
[-10, 10, -10],
[10, -10, -10], # third image
[-10, 10, -10],
[-10, 10, -10],
[10, -10, -10],
[10, -10, -10],
[-10, 10, -10]
],
dtype=tf.float32)
mask_predictions_logits = 20 * tf.ones((batch_size *
model.max_num_proposals,
model.num_classes,
14, 14),
dtype=tf.float32)
groundtruth_boxes_list = [
tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32),
tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32),
tf.constant([[0, .5, .5, 1], [.5, 0, 1, 1]], dtype=tf.float32)
]
groundtruth_classes_list = [
tf.constant([[1, 0], [0, 1]], dtype=tf.float32),
tf.constant([[1, 0], [1, 0]], dtype=tf.float32),
tf.constant([[1, 0], [0, 1]], dtype=tf.float32)
]
# Set all elements of groundtruth mask to 1.0. In this case all proposal
# crops of the groundtruth masks should return a mask that covers the
# entire proposal. Thus, if mask_predictions_logits element values are all
# greater than 20, the loss should be zero.
groundtruth_masks_list = [
tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32),
tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32),
tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32)
]
groundtruth_weights_list = [
tf.constant([1, 1], dtype=tf.float32),
tf.constant([1, 1], dtype=tf.float32),
tf.constant([1, 0], dtype=tf.float32)
]
prediction_dict = {
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'image_shape': image_shape,
'anchors': anchors,
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'proposal_boxes': proposal_boxes,
'num_proposals': num_proposals,
'mask_predictions': mask_predictions_logits
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list,
groundtruth_weights_list=groundtruth_weights_list)
loss_dict = model.loss(prediction_dict, true_image_shapes)
return (loss_dict['Loss/RPNLoss/localization_loss'],
loss_dict['Loss/RPNLoss/objectness_loss'],
loss_dict['Loss/BoxClassifierLoss/localization_loss'],
loss_dict['Loss/BoxClassifierLoss/classification_loss'],
loss_dict['Loss/BoxClassifierLoss/mask_loss'])
(rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss,
box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g)
self.assertAllClose(rpn_loc_loss, 0)
self.assertAllClose(rpn_obj_loss, 0)
self.assertAllClose(box_loc_loss, 0)
self.assertAllClose(box_cls_loss, 0)
self.assertAllClose(box_mask_loss, 0)
def test_loss_full_zero_padded_proposals(self):
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True, number_of_stages=2, second_stage_batch_size=6)
batch_size = 1
def graph_fn():
"""A function with TF compute."""
anchors = tf.constant(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=tf.float32)
rpn_box_encodings = tf.zeros(
[batch_size,
anchors.get_shape().as_list()[0],
BOX_CODE_SIZE], dtype=tf.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = tf.constant([
[[-10, 13],
[10, -10],
[10, -11],
[10, -12]],], dtype=tf.float32)
image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32)
# box_classifier_batch_size is 6, but here we assume that the number of
# actual proposals (not counting zero paddings) is fewer (3).
num_proposals = tf.constant([3], dtype=tf.int32)
proposal_boxes = tf.constant(
[[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[0, 0, 0, 0], # begin paddings
[0, 0, 0, 0],
[0, 0, 0, 0]]], dtype=tf.float32)
refined_box_encodings = tf.zeros(
(batch_size * model.max_num_proposals,
model.num_classes,
BOX_CODE_SIZE), dtype=tf.float32)
class_predictions_with_background = tf.constant(
[[-10, 10, -10],
[10, -10, -10],
[10, -10, -10],
[0, 0, 0], # begin paddings
[0, 0, 0],
[0, 0, 0]], dtype=tf.float32)
mask_predictions_logits = 20 * tf.ones((batch_size *
model.max_num_proposals,
model.num_classes,
14, 14),
dtype=tf.float32)
groundtruth_boxes_list = [
tf.constant([[0, 0, .5, .5]], dtype=tf.float32)]
groundtruth_classes_list = [tf.constant([[1, 0]], dtype=tf.float32)]
# Set all elements of groundtruth mask to 1.0. In this case all proposal
# crops of the groundtruth masks should return a mask that covers the
# entire proposal. Thus, if mask_predictions_logits element values are all
# greater than 20, the loss should be zero.
groundtruth_masks_list = [tf.convert_to_tensor(np.ones((1, 32, 32)),
dtype=tf.float32)]
prediction_dict = {
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'image_shape': image_shape,
'anchors': anchors,
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'proposal_boxes': proposal_boxes,
'num_proposals': num_proposals,
'mask_predictions': mask_predictions_logits
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list)
loss_dict = model.loss(prediction_dict, true_image_shapes)
return (loss_dict['Loss/RPNLoss/localization_loss'],
loss_dict['Loss/RPNLoss/objectness_loss'],
loss_dict['Loss/BoxClassifierLoss/localization_loss'],
loss_dict['Loss/BoxClassifierLoss/classification_loss'],
loss_dict['Loss/BoxClassifierLoss/mask_loss'])
(rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss,
box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g)
self.assertAllClose(rpn_loc_loss, 0)
self.assertAllClose(rpn_obj_loss, 0)
self.assertAllClose(box_loc_loss, 0)
self.assertAllClose(box_cls_loss, 0)
self.assertAllClose(box_mask_loss, 0)
def test_loss_full_multiple_label_groundtruth(self):
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True,
number_of_stages=2, second_stage_batch_size=6,
softmax_second_stage_classification_loss=False)
batch_size = 1
def graph_fn():
"""A function with TF compute."""
anchors = tf.constant(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=tf.float32)
rpn_box_encodings = tf.zeros(
[batch_size,
anchors.get_shape().as_list()[0],
BOX_CODE_SIZE], dtype=tf.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = tf.constant([
[[-10, 13],
[10, -10],
[10, -11],
[10, -12]],], dtype=tf.float32)
image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32)
# box_classifier_batch_size is 6, but here we assume that the number of
# actual proposals (not counting zero paddings) is fewer (3).
num_proposals = tf.constant([3], dtype=tf.int32)
proposal_boxes = tf.constant(
[[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[0, 0, 0, 0], # begin paddings
[0, 0, 0, 0],
[0, 0, 0, 0]]], dtype=tf.float32)
# second_stage_localization_loss should only be computed for predictions
# that match groundtruth. For multiple label groundtruth boxes, the loss
# should only be computed once for the label with the smaller index.
refined_box_encodings = tf.constant(
[[[0, 0, 0, 0], [1, 1, -1, -1]],
[[1, 1, -1, -1], [1, 1, 1, 1]],
[[1, 1, -1, -1], [1, 1, 1, 1]],
[[1, 1, -1, -1], [1, 1, 1, 1]],
[[1, 1, -1, -1], [1, 1, 1, 1]],
[[1, 1, -1, -1], [1, 1, 1, 1]]], dtype=tf.float32)
class_predictions_with_background = tf.constant(
[[-100, 100, 100],
[100, -100, -100],
[100, -100, -100],
[0, 0, 0], # begin paddings
[0, 0, 0],
[0, 0, 0]], dtype=tf.float32)
mask_predictions_logits = 20 * tf.ones((batch_size *
model.max_num_proposals,
model.num_classes,
14, 14),
dtype=tf.float32)
groundtruth_boxes_list = [
tf.constant([[0, 0, .5, .5]], dtype=tf.float32)]
# Box contains two ground truth labels.
groundtruth_classes_list = [tf.constant([[1, 1]], dtype=tf.float32)]
# Set all elements of groundtruth mask to 1.0. In this case all proposal
# crops of the groundtruth masks should return a mask that covers the
# entire proposal. Thus, if mask_predictions_logits element values are all
# greater than 20, the loss should be zero.
groundtruth_masks_list = [tf.convert_to_tensor(np.ones((1, 32, 32)),
dtype=tf.float32)]
prediction_dict = {
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'image_shape': image_shape,
'anchors': anchors,
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'proposal_boxes': proposal_boxes,
'num_proposals': num_proposals,
'mask_predictions': mask_predictions_logits
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list)
loss_dict = model.loss(prediction_dict, true_image_shapes)
return (loss_dict['Loss/RPNLoss/localization_loss'],
loss_dict['Loss/RPNLoss/objectness_loss'],
loss_dict['Loss/BoxClassifierLoss/localization_loss'],
loss_dict['Loss/BoxClassifierLoss/classification_loss'],
loss_dict['Loss/BoxClassifierLoss/mask_loss'])
(rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss,
box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g)
self.assertAllClose(rpn_loc_loss, 0)
self.assertAllClose(rpn_obj_loss, 0)
self.assertAllClose(box_loc_loss, 0)
self.assertAllClose(box_cls_loss, 0)
self.assertAllClose(box_mask_loss, 0)
@parameterized.parameters(
{'use_static_shapes': False, 'shared_boxes': False},
{'use_static_shapes': False, 'shared_boxes': True},
{'use_static_shapes': True, 'shared_boxes': False},
{'use_static_shapes': True, 'shared_boxes': True},
)
def test_loss_full_zero_padded_proposals_nonzero_loss_with_two_images(
self, use_static_shapes=False, shared_boxes=False):
batch_size = 2
first_stage_max_proposals = 8
second_stage_batch_size = 6
num_classes = 2
with test_utils.GraphContextOrNone() as g:
model = self._build_model(
is_training=True,
number_of_stages=2,
second_stage_batch_size=second_stage_batch_size,
first_stage_max_proposals=first_stage_max_proposals,
num_classes=num_classes,
use_matmul_crop_and_resize=use_static_shapes,
clip_anchors_to_image=use_static_shapes,
use_static_shapes=use_static_shapes)
def graph_fn(anchors, rpn_box_encodings,
rpn_objectness_predictions_with_background, images,
num_proposals, proposal_boxes, refined_box_encodings,
class_predictions_with_background, groundtruth_boxes,
groundtruth_classes):
"""Function to construct tf graph for the test."""
prediction_dict = {
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'image_shape': tf.shape(images),
'anchors': anchors,
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'proposal_boxes': proposal_boxes,
'num_proposals': num_proposals
}
_, true_image_shapes = model.preprocess(images)
model.provide_groundtruth(tf.unstack(groundtruth_boxes),
tf.unstack(groundtruth_classes))
loss_dict = model.loss(prediction_dict, true_image_shapes)
return (loss_dict['Loss/RPNLoss/localization_loss'],
loss_dict['Loss/RPNLoss/objectness_loss'],
loss_dict['Loss/BoxClassifierLoss/localization_loss'],
loss_dict['Loss/BoxClassifierLoss/classification_loss'])
anchors = np.array(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=np.float32)
rpn_box_encodings = np.zeros(
[batch_size, anchors.shape[1], BOX_CODE_SIZE], dtype=np.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = np.array(
[[[-10, 13],
[10, -10],
[10, -11],
[10, -12]],
[[-10, 13],
[10, -10],
[10, -11],
[10, -12]]], dtype=np.float32)
images = np.zeros([batch_size, 32, 32, 3], dtype=np.float32)
# box_classifier_batch_size is 6, but here we assume that the number of
# actual proposals (not counting zero paddings) is fewer.
num_proposals = np.array([3, 2], dtype=np.int32)
proposal_boxes = np.array(
[[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[0, 0, 0, 0], # begin paddings
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 16, 16],
[0, 16, 16, 32],
[0, 0, 0, 0], # begin paddings
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]], dtype=np.float32)
refined_box_encodings = np.zeros(
(batch_size * second_stage_batch_size, 1
if shared_boxes else num_classes, BOX_CODE_SIZE),
dtype=np.float32)
class_predictions_with_background = np.array(
[[-10, 10, -10], # first image
[10, -10, -10],
[10, -10, -10],
[0, 0, 0], # begin paddings
[0, 0, 0],
[0, 0, 0],
[-10, -10, 10], # second image
[10, -10, -10],
[0, 0, 0], # begin paddings
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],], dtype=np.float32)
# The first groundtruth box is 4/5 of the anchor size in both directions
# experiencing a loss of:
# 2 * SmoothL1(5 * log(4/5)) / num_proposals
# = 2 * (abs(5 * log(1/2)) - .5) / 3
# The second groundtruth box is identical to the prediction and thus
# experiences zero loss.
# Total average loss is (abs(5 * log(1/2)) - .5) / 3.
groundtruth_boxes = np.stack([
np.array([[0.05, 0.05, 0.45, 0.45]], dtype=np.float32),
np.array([[0.0, 0.0, 0.5, 0.5]], dtype=np.float32)])
groundtruth_classes = np.stack([np.array([[1, 0]], dtype=np.float32),
np.array([[0, 1]], dtype=np.float32)])
execute_fn = self.execute_cpu
if use_static_shapes:
execute_fn = self.execute
results = execute_fn(graph_fn, [
anchors, rpn_box_encodings, rpn_objectness_predictions_with_background,
images, num_proposals, proposal_boxes, refined_box_encodings,
class_predictions_with_background, groundtruth_boxes,
groundtruth_classes
], graph=g)
exp_loc_loss = (-5 * np.log(.8) - 0.5) / 3.0
self.assertAllClose(results[0], exp_loc_loss, rtol=1e-4, atol=1e-4)
self.assertAllClose(results[1], 0.0)
self.assertAllClose(results[2], exp_loc_loss, rtol=1e-4, atol=1e-4)
self.assertAllClose(results[3], 0.0)
def test_loss_with_hard_mining(self):
with test_utils.GraphContextOrNone() as g:
model = self._build_model(is_training=True,
number_of_stages=2,
second_stage_batch_size=None,
first_stage_max_proposals=6,
hard_mining=True)
batch_size = 1
def graph_fn():
"""A function with TF compute."""
anchors = tf.constant(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=tf.float32)
rpn_box_encodings = tf.zeros(
[batch_size,
anchors.get_shape().as_list()[0],
BOX_CODE_SIZE], dtype=tf.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = tf.constant(
[[[-10, 13],
[-10, 12],
[10, -11],
[10, -12]]], dtype=tf.float32)
image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32)
# box_classifier_batch_size is 6, but here we assume that the number of
# actual proposals (not counting zero paddings) is fewer (3).
num_proposals = tf.constant([3], dtype=tf.int32)
proposal_boxes = tf.constant(
[[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[0, 0, 0, 0], # begin paddings
[0, 0, 0, 0],
[0, 0, 0, 0]]], dtype=tf.float32)
refined_box_encodings = tf.zeros(
(batch_size * model.max_num_proposals,
model.num_classes,
BOX_CODE_SIZE), dtype=tf.float32)
class_predictions_with_background = tf.constant(
[[-10, 10, -10], # first image
[-10, -10, 10],
[10, -10, -10],
[0, 0, 0], # begin paddings
[0, 0, 0],
[0, 0, 0]], dtype=tf.float32)
# The first groundtruth box is 4/5 of the anchor size in both directions
# experiencing a loss of:
# 2 * SmoothL1(5 * log(4/5)) / num_proposals
# = 2 * (abs(5 * log(1/2)) - .5) / 3
# The second groundtruth box is 46/50 of the anchor size in both
# directions experiencing a loss of:
# 2 * SmoothL1(5 * log(42/50)) / num_proposals
# = 2 * (.5(5 * log(.92))^2 - .5) / 3.
# Since the first groundtruth box experiences greater loss, and we have
# set num_hard_examples=1 in the HardMiner, the final localization loss
# corresponds to that of the first groundtruth box.
groundtruth_boxes_list = [
tf.constant([[0.05, 0.05, 0.45, 0.45],
[0.02, 0.52, 0.48, 0.98],], dtype=tf.float32)]
groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]],
dtype=tf.float32)]
prediction_dict = {
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'image_shape': image_shape,
'anchors': anchors,
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'proposal_boxes': proposal_boxes,
'num_proposals': num_proposals
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
loss_dict = model.loss(prediction_dict, true_image_shapes)
return (loss_dict['Loss/BoxClassifierLoss/localization_loss'],
loss_dict['Loss/BoxClassifierLoss/classification_loss'])
loc_loss, cls_loss = self.execute_cpu(graph_fn, [], graph=g)
exp_loc_loss = 2 * (-5 * np.log(.8) - 0.5) / 3.0
self.assertAllClose(loc_loss, exp_loc_loss)
self.assertAllClose(cls_loss, 0)
def test_loss_with_hard_mining_and_losses_mask(self):
with test_utils.GraphContextOrNone() as g:
model = self._build_model(is_training=True,
number_of_stages=2,
second_stage_batch_size=None,
first_stage_max_proposals=6,
hard_mining=True)
batch_size = 2
number_of_proposals = 3
def graph_fn():
"""A function with TF compute."""
anchors = tf.constant(
[[0, 0, 16, 16],
[0, 16, 16, 32],
[16, 0, 32, 16],
[16, 16, 32, 32]], dtype=tf.float32)
rpn_box_encodings = tf.zeros(
[batch_size,
anchors.get_shape().as_list()[0],
BOX_CODE_SIZE], dtype=tf.float32)
# use different numbers for the objectness category to break ties in
# order of boxes returned by NMS
rpn_objectness_predictions_with_background = tf.constant(
[[[-10, 13],
[-10, 12],
[10, -11],
[10, -12]],
[[-10, 13],
[-10, 12],
[10, -11],
[10, -12]]], dtype=tf.float32)
image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32)
# box_classifier_batch_size is 6, but here we assume that the number of
# actual proposals (not counting zero paddings) is fewer (3).
num_proposals = tf.constant([number_of_proposals, number_of_proposals],
dtype=tf.int32)
proposal_boxes = tf.constant(
[[[0, 0, 16, 16], # first image
[0, 16, 16, 32],
[16, 0, 32, 16],
[0, 0, 0, 0], # begin paddings
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 16, 16], # second image
[0, 16, 16, 32],
[16, 0, 32, 16],
[0, 0, 0, 0], # begin paddings
[0, 0, 0, 0],
[0, 0, 0, 0]]], dtype=tf.float32)
refined_box_encodings = tf.zeros(
(batch_size * model.max_num_proposals,
model.num_classes,
BOX_CODE_SIZE), dtype=tf.float32)
class_predictions_with_background = tf.constant(
[[-10, 10, -10], # first image
[-10, -10, 10],
[10, -10, -10],
[0, 0, 0], # begin paddings
[0, 0, 0],
[0, 0, 0],
[-10, 10, -10], # second image
[-10, -10, 10],
[10, -10, -10],
[0, 0, 0], # begin paddings
[0, 0, 0],
[0, 0, 0]], dtype=tf.float32)
# The first groundtruth box is 4/5 of the anchor size in both directions
# experiencing a loss of:
# 2 * SmoothL1(5 * log(4/5)) / (num_proposals * batch_size)
# = 2 * (abs(5 * log(1/2)) - .5) / 3
# The second groundtruth box is 46/50 of the anchor size in both
# directions experiencing a loss of:
# 2 * SmoothL1(5 * log(42/50)) / (num_proposals * batch_size)
# = 2 * (.5(5 * log(.92))^2 - .5) / 3.
# Since the first groundtruth box experiences greater loss, and we have
# set num_hard_examples=1 in the HardMiner, the final localization loss
# corresponds to that of the first groundtruth box.
groundtruth_boxes_list = [
tf.constant([[0.05, 0.05, 0.45, 0.45],
[0.02, 0.52, 0.48, 0.98]], dtype=tf.float32),
tf.constant([[0.05, 0.05, 0.45, 0.45],
[0.02, 0.52, 0.48, 0.98]], dtype=tf.float32)]
groundtruth_classes_list = [
tf.constant([[1, 0], [0, 1]], dtype=tf.float32),
tf.constant([[1, 0], [0, 1]], dtype=tf.float32)]
is_annotated_list = [tf.constant(True, dtype=tf.bool),
tf.constant(False, dtype=tf.bool)]
prediction_dict = {
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
'image_shape': image_shape,
'anchors': anchors,
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'proposal_boxes': proposal_boxes,
'num_proposals': num_proposals
}
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
is_annotated_list=is_annotated_list)
loss_dict = model.loss(prediction_dict, true_image_shapes)
return (loss_dict['Loss/BoxClassifierLoss/localization_loss'],
loss_dict['Loss/BoxClassifierLoss/classification_loss'])
exp_loc_loss = (2 * (-5 * np.log(.8) - 0.5) /
(number_of_proposals * batch_size))
loc_loss, cls_loss = self.execute_cpu(graph_fn, [], graph=g)
self.assertAllClose(loc_loss, exp_loc_loss)
self.assertAllClose(cls_loss, 0)
def test_restore_map_for_classification_ckpt(self):
if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.')
# Define mock tensorflow classification graph and save variables.
test_graph_classification = tf.Graph()
with test_graph_classification.as_default():
image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3])
with tf.variable_scope('mock_model'):
net = slim.conv2d(image, num_outputs=3, kernel_size=1, scope='layer1')
slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session(graph=test_graph_classification) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
# Create tensorflow detection graph and load variables from
# classification checkpoint.
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model = self._build_model(
is_training=False,
number_of_stages=2, second_stage_batch_size=6)
inputs_shape = (2, 20, 20, 3)
inputs = tf.cast(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
var_map = model.restore_map(fine_tune_checkpoint_type='classification')
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.test_session(graph=test_graph_classification) as sess:
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn(model.first_stage_feature_extractor_scope, var)
self.assertNotIn(model.second_stage_feature_extractor_scope, var)
def test_restore_map_for_detection_ckpt(self):
if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.')
# Define mock tensorflow classification graph and save variables.
# Define first detection graph and save variables.
test_graph_detection1 = tf.Graph()
with test_graph_detection1.as_default():
model = self._build_model(
is_training=False,
number_of_stages=2, second_stage_batch_size=6)
inputs_shape = (2, 20, 20, 3)
inputs = tf.cast(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session(graph=test_graph_detection1) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
# Define second detection graph and restore variables.
test_graph_detection2 = tf.Graph()
with test_graph_detection2.as_default():
model2 = self._build_model(is_training=False,
number_of_stages=2,
second_stage_batch_size=6, num_classes=42)
inputs_shape2 = (2, 20, 20, 3)
inputs2 = tf.cast(tf.random_uniform(
inputs_shape2, minval=0, maxval=255, dtype=tf.int32),
dtype=tf.float32)
preprocessed_inputs2, true_image_shapes = model2.preprocess(inputs2)
prediction_dict2 = model2.predict(preprocessed_inputs2, true_image_shapes)
model2.postprocess(prediction_dict2, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model2.restore_map(fine_tune_checkpoint_type='detection')
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.test_session(graph=test_graph_detection2) as sess:
saver.restore(sess, saved_model_path)
uninitialized_vars_list = sess.run(tf.report_uninitialized_variables())
self.assertIn(six.b('another_variable'), uninitialized_vars_list)
for var in uninitialized_vars_list:
self.assertNotIn(
six.b(model2.first_stage_feature_extractor_scope), var)
self.assertNotIn(
six.b(model2.second_stage_feature_extractor_scope), var)
def test_load_all_det_checkpoint_vars(self):
if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.')
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model = self._build_model(
is_training=False,
number_of_stages=2,
second_stage_batch_size=6,
num_classes=42)
inputs_shape = (2, 20, 20, 3)
inputs = tf.cast(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32),
dtype=tf.float32)
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=True)
self.assertIsInstance(var_map, dict)
self.assertIn('another_variable', var_map)
if __name__ == '__main__':
tf.test.main()
| 93,899 | 42.014201 | 104 | py |
models | models-master/research/object_detection/meta_architectures/center_net_meta_arch.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The CenterNet meta architecture as described in the "Objects as Points" paper [1].
[1]: https://arxiv.org/abs/1904.07850
"""
import abc
import collections
import functools
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as cn_assigner
from object_detection.utils import shape_utils
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import tf_version
# Number of channels needed to predict size and offsets.
NUM_OFFSET_CHANNELS = 2
NUM_SIZE_CHANNELS = 2
# Error range for detecting peaks.
PEAK_EPSILON = 1e-6
class CenterNetFeatureExtractor(tf.keras.Model):
"""Base class for feature extractors for the CenterNet meta architecture.
Child classes are expected to override the _output_model property which will
return 1 or more tensors predicted by the feature extractor.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes a CenterNet feature extractor.
Args:
name: str, the name used for the underlying keras model.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it. If None or empty, we use 0s.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
If None or empty, we use 1s.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetFeatureExtractor, self).__init__(name=name)
if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test
channel_means = [0., 0., 0.]
if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test
channel_stds = [1., 1., 1.]
self._channel_means = channel_means
self._channel_stds = channel_stds
self._bgr_ordering = bgr_ordering
def preprocess(self, inputs):
"""Converts a batch of unscaled images to a scale suitable for the model.
This method normalizes the image using the given `channel_means` and
`channels_stds` values at initialization time while optionally flipping
the channel order if `bgr_ordering` is set.
Args:
inputs: a [batch, height, width, channels] float32 tensor
Returns:
outputs: a [batch, height, width, channels] float32 tensor
"""
if self._bgr_ordering:
red, green, blue = tf.unstack(inputs, axis=3)
inputs = tf.stack([blue, green, red], axis=3)
channel_means = tf.reshape(tf.constant(self._channel_means),
[1, 1, 1, -1])
channel_stds = tf.reshape(tf.constant(self._channel_stds),
[1, 1, 1, -1])
return (inputs - channel_means)/channel_stds
def preprocess_reverse(self, preprocessed_inputs):
"""Undo the preprocessing and return the raw image.
This is a convenience function for some algorithms that require access
to the raw inputs.
Args:
preprocessed_inputs: A [batch_size, height, width, channels] float
tensor preprocessed_inputs from the preprocess function.
Returns:
images: A [batch_size, height, width, channels] float tensor with
the preprocessing removed.
"""
channel_means = tf.reshape(tf.constant(self._channel_means),
[1, 1, 1, -1])
channel_stds = tf.reshape(tf.constant(self._channel_stds),
[1, 1, 1, -1])
inputs = (preprocessed_inputs * channel_stds) + channel_means
if self._bgr_ordering:
blue, green, red = tf.unstack(inputs, axis=3)
inputs = tf.stack([red, green, blue], axis=3)
return inputs
@property
@abc.abstractmethod
def out_stride(self):
"""The stride in the output image of the network."""
pass
@property
@abc.abstractmethod
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
pass
@property
def classification_backbone(self):
raise NotImplementedError(
'Classification backbone not supported for {}'.format(type(self)))
def make_prediction_net(num_out_channels, kernel_sizes=(3), num_filters=(256),
bias_fill=None, use_depthwise=False, name=None,
unit_height_conv=True):
"""Creates a network to predict the given number of output channels.
This function is intended to make the prediction heads for the CenterNet
meta architecture.
Args:
num_out_channels: Number of output channels.
kernel_sizes: A list representing the sizes of the conv kernel in the
intermediate layer. Note that the length of the list indicates the number
of intermediate conv layers and it must be the same as the length of the
num_filters.
num_filters: A list representing the number of filters in the intermediate
conv layer. Note that the length of the list indicates the number of
intermediate conv layers.
bias_fill: If not None, is used to initialize the bias in the final conv
layer.
use_depthwise: If true, use SeparableConv2D to construct the Sequential
layers instead of Conv2D.
name: Optional name for the prediction net.
unit_height_conv: If True, Conv2Ds have asymmetric kernels with height=1.
Returns:
net: A keras module which when called on an input tensor of size
[batch_size, height, width, num_in_channels] returns an output
of size [batch_size, height, width, num_out_channels]
"""
if isinstance(kernel_sizes, int) and isinstance(num_filters, int):
kernel_sizes = [kernel_sizes]
num_filters = [num_filters]
assert len(kernel_sizes) == len(num_filters)
if use_depthwise:
conv_fn = tf.keras.layers.SeparableConv2D
else:
conv_fn = tf.keras.layers.Conv2D
# We name the convolution operations explicitly because Keras, by default,
# uses different names during training and evaluation. By setting the names
# here, we avoid unexpected pipeline breakage in TF1.
out_conv = tf.keras.layers.Conv2D(
num_out_channels,
kernel_size=1,
name='conv1' if tf_version.is_tf1() else None)
if bias_fill is not None:
out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)
layers = []
for idx, (kernel_size,
num_filter) in enumerate(zip(kernel_sizes, num_filters)):
layers.append(
conv_fn(
num_filter,
kernel_size=[1, kernel_size] if unit_height_conv else kernel_size,
padding='same',
name='conv2_%d' % idx if tf_version.is_tf1() else None))
layers.append(tf.keras.layers.ReLU())
layers.append(out_conv)
net = tf.keras.Sequential(layers, name=name)
return net
def _to_float32(x):
return tf.cast(x, tf.float32)
def _get_shape(tensor, num_dims):
assert len(tensor.shape.as_list()) == num_dims
return shape_utils.combined_static_and_dynamic_shape(tensor)
def _flatten_spatial_dimensions(batch_images):
batch_size, height, width, channels = _get_shape(batch_images, 4)
return tf.reshape(batch_images, [batch_size, height * width,
channels])
def _multi_range(limit,
value_repetitions=1,
range_repetitions=1,
dtype=tf.int32):
"""Creates a sequence with optional value duplication and range repetition.
As an example (see the Args section for more details),
_multi_range(limit=2, value_repetitions=3, range_repetitions=4) returns:
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]
Args:
limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive.
value_repetitions: Integer. The number of times a value in the sequence is
repeated. With value_repetitions=3, the result is [0, 0, 0, 1, 1, 1, ..].
range_repetitions: Integer. The number of times the range is repeated. With
range_repetitions=3, the result is [0, 1, 2, .., 0, 1, 2, ..].
dtype: The type of the elements of the resulting tensor.
Returns:
A 1-D tensor of type `dtype` and size
[`limit` * `value_repetitions` * `range_repetitions`] that contains the
specified range with given repetitions.
"""
return tf.reshape(
tf.tile(
tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1),
multiples=[range_repetitions, value_repetitions]), [-1])
def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100,
per_channel=False):
"""Returns the top k scores and their locations in a feature map.
Given a feature map, the top k values (based on activation) are returned. If
`per_channel` is True, the top k values **per channel** are returned. Note
that when k equals to 1, ths function uses reduce_max and argmax instead of
top_k to make the logics more efficient.
The `max_pool_kernel_size` argument allows for selecting local peaks in a
region. This filtering is done per channel, so nothing prevents two values at
the same location to be returned.
Args:
feature_map: [batch, height, width, channels] float32 feature map.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood (independently for each channel).
For example, to make sure no two neighboring values (in the same channel)
are returned, set max_pool_kernel_size=3. If None or 1, will not apply max
pooling.
k: The number of highest scoring locations to return.
per_channel: If True, will return the top k scores and locations per
feature map channel. If False, the top k across the entire feature map
(height x width x channels) are returned.
Returns:
Tuple of
scores: A [batch, N] float32 tensor with scores from the feature map in
descending order. If per_channel is False, N = k. Otherwise,
N = k * channels, and the first k elements correspond to channel 0, the
second k correspond to channel 1, etc.
y_indices: A [batch, N] int tensor with y indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
x_indices: A [batch, N] int tensor with x indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
channel_indices: A [batch, N] int tensor with channel indices of the top k
feature map locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
"""
if not max_pool_kernel_size or max_pool_kernel_size == 1:
feature_map_peaks = feature_map
else:
feature_map_max_pool = tf.nn.max_pool(
feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')
feature_map_peak_mask = tf.math.abs(
feature_map - feature_map_max_pool) < PEAK_EPSILON
# Zero out everything that is not a peak.
feature_map_peaks = (
feature_map * _to_float32(feature_map_peak_mask))
batch_size, _, width, num_channels = _get_shape(feature_map, 4)
if per_channel:
if k == 1:
feature_map_flattened = tf.reshape(
feature_map_peaks, [batch_size, -1, num_channels])
scores = tf.math.reduce_max(feature_map_flattened, axis=1)
peak_flat_indices = tf.math.argmax(
feature_map_flattened, axis=1, output_type=tf.dtypes.int32)
peak_flat_indices = tf.expand_dims(peak_flat_indices, axis=-1)
else:
# Perform top k over batch and channels.
feature_map_peaks_transposed = tf.transpose(feature_map_peaks,
perm=[0, 3, 1, 2])
feature_map_peaks_transposed = tf.reshape(
feature_map_peaks_transposed, [batch_size, num_channels, -1])
# safe_k will be used whenever there are fewer positions in the heatmap
# than the requested number of locations to score. In that case, all
# positions are returned in sorted order. To ensure consistent shapes for
# downstream ops the outputs are padded with zeros. Safe_k is also
# fine for TPU because TPUs require a fixed input size so the number of
# positions will also be fixed.
safe_k = tf.minimum(k, tf.shape(feature_map_peaks_transposed)[-1])
scores, peak_flat_indices = tf.math.top_k(
feature_map_peaks_transposed, k=safe_k)
scores = tf.pad(scores, [(0, 0), (0, 0), (0, k - safe_k)])
peak_flat_indices = tf.pad(peak_flat_indices,
[(0, 0), (0, 0), (0, k - safe_k)])
scores = tf.ensure_shape(scores, (batch_size, num_channels, k))
peak_flat_indices = tf.ensure_shape(peak_flat_indices,
(batch_size, num_channels, k))
# Convert the indices such that they represent the location in the full
# (flattened) feature map of size [batch, height * width * channels].
channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]
peak_flat_indices = num_channels * peak_flat_indices + channel_idx
scores = tf.reshape(scores, [batch_size, -1])
peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1])
else:
if k == 1:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])
scores = tf.math.reduce_max(feature_map_peaks_flat, axis=1, keepdims=True)
peak_flat_indices = tf.expand_dims(tf.math.argmax(
feature_map_peaks_flat, axis=1, output_type=tf.dtypes.int32), axis=-1)
else:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])
safe_k = tf.minimum(k, tf.shape(feature_map_peaks_flat)[1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat,
k=safe_k)
# Get x, y and channel indices corresponding to the top indices in the flat
# array.
y_indices, x_indices, channel_indices = (
row_col_channel_indices_from_flattened_indices(
peak_flat_indices, width, num_channels))
return scores, y_indices, x_indices, channel_indices
def prediction_tensors_to_boxes(y_indices, x_indices, height_width_predictions,
offset_predictions):
"""Converts CenterNet class-center, offset and size predictions to boxes.
Args:
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
height_width_predictions: A float tensor of shape [batch_size, height,
width, 2] representing the height and width of a box centered at each
pixel.
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box centered at each pixel. This
helps reduce the error from downsampling.
Returns:
detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the
the raw bounding box coordinates of boxes.
"""
batch_size, num_boxes = _get_shape(y_indices, 2)
_, height, width, _ = _get_shape(height_width_predictions, 4)
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_boxes),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
new_height_width = tf.gather_nd(height_width_predictions, combined_indices)
new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, 2])
new_offsets = tf.gather_nd(offset_predictions, combined_indices)
offsets = tf.reshape(new_offsets, [batch_size, num_boxes, 2])
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
height_width = tf.maximum(new_height_width, 0)
heights, widths = tf.unstack(height_width, axis=2)
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
ymin = y_indices + y_offsets - heights / 2.0
xmin = x_indices + x_offsets - widths / 2.0
ymax = y_indices + y_offsets + heights / 2.0
xmax = x_indices + x_offsets + widths / 2.0
ymin = tf.clip_by_value(ymin, 0., height)
xmin = tf.clip_by_value(xmin, 0., width)
ymax = tf.clip_by_value(ymax, 0., height)
xmax = tf.clip_by_value(xmax, 0., width)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=2)
return boxes
def prediction_tensors_to_temporal_offsets(
y_indices, x_indices, offset_predictions):
"""Converts CenterNet temporal offset map predictions to batched format.
This function is similar to the box offset conversion function, as both
temporal offsets and box offsets are size-2 vectors.
Args:
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box's center across adjacent frames.
Returns:
offsets: A tensor of shape [batch_size, num_boxes, 2] holding the
the object temporal offsets of (y, x) dimensions.
"""
batch_size, num_boxes = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_boxes),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
new_offsets = tf.gather_nd(offset_predictions, combined_indices)
offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1])
return offsets
def prediction_tensors_to_keypoint_candidates(keypoint_heatmap_predictions,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.1,
max_pool_kernel_size=1,
max_candidates=20,
keypoint_depths=None):
"""Convert keypoint heatmap predictions and offsets to keypoint candidates.
Args:
keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,
width, num_keypoints] representing the per-keypoint heatmaps.
keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,
width, 2] (or [batch_size, height, width, 2 * num_keypoints] if
'per_keypoint_offset' is set True) representing the per-keypoint offsets.
keypoint_score_threshold: float, the threshold for considering a keypoint a
candidate.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood. For example, to make sure no two
neighboring values for the same keypoint are returned, set
max_pool_kernel_size=3. If None or 1, will not apply any local filtering.
max_candidates: integer, maximum number of keypoint candidates per keypoint
type.
keypoint_depths: (optional) A float tensor of shape [batch_size, height,
width, 1] (or [batch_size, height, width, num_keypoints] if
'per_keypoint_depth' is set True) representing the per-keypoint depths.
Returns:
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] with the number of candidates for each
keypoint type, as it's possible to filter some candidates due to the score
threshold.
depth_candidates: A tensor of shape [batch_size, max_candidates,
num_keypoints] representing the estimated depth of each keypoint
candidate. Return None if the input keypoint_depths is None.
"""
batch_size, _, _, num_keypoints = _get_shape(keypoint_heatmap_predictions, 4)
# Get x, y and channel indices corresponding to the top indices in the
# keypoint heatmap predictions.
# Note that the top k candidates are produced for **each keypoint type**.
# Might be worth eventually trying top k in the feature map, independent of
# the keypoint type.
keypoint_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(keypoint_heatmap_predictions,
max_pool_kernel_size=max_pool_kernel_size,
k=max_candidates,
per_channel=True))
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
_, num_indices = _get_shape(y_indices, 2)
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,
combined_indices)
selected_offsets = tf.reshape(selected_offsets_flat,
[batch_size, num_indices, -1])
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
_, _, num_channels = _get_shape(selected_offsets, 3)
if num_channels > 2:
# Offsets are per keypoint and the last dimension of selected_offsets
# contains all those offsets, so reshape the offsets to make sure that the
# last dimension contains (y_offset, x_offset) for a single keypoint.
reshaped_offsets = tf.reshape(selected_offsets,
[batch_size, num_indices, -1, 2])
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that. In this
# case, channel_indices indicates which keypoint to use the offset from.
channel_combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
_multi_range(num_indices, range_repetitions=batch_size),
tf.reshape(channel_indices, [-1])
], axis=1)
offsets = tf.gather_nd(reshaped_offsets, channel_combined_indices)
offsets = tf.reshape(offsets, [batch_size, num_indices, -1])
else:
offsets = selected_offsets
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
keypoint_candidates = tf.stack([y_indices + y_offsets,
x_indices + x_offsets], axis=2)
keypoint_candidates = tf.reshape(
keypoint_candidates,
[batch_size, num_keypoints, max_candidates, 2])
keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores = tf.reshape(
keypoint_scores,
[batch_size, num_keypoints, max_candidates])
keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])
num_candidates = tf.reduce_sum(
tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1)
depth_candidates = None
if keypoint_depths is not None:
selected_depth_flat = tf.gather_nd(keypoint_depths, combined_indices)
selected_depth = tf.reshape(selected_depth_flat,
[batch_size, num_indices, -1])
_, _, num_depth_channels = _get_shape(selected_depth, 3)
if num_depth_channels > 1:
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
_multi_range(num_indices, range_repetitions=batch_size),
tf.reshape(channel_indices, [-1])
], axis=1)
depth = tf.gather_nd(selected_depth, combined_indices)
depth = tf.reshape(depth, [batch_size, num_indices, -1])
else:
depth = selected_depth
depth_candidates = tf.reshape(depth,
[batch_size, num_keypoints, max_candidates])
depth_candidates = tf.transpose(depth_candidates, [0, 2, 1])
return keypoint_candidates, keypoint_scores, num_candidates, depth_candidates
def argmax_feature_map_locations(feature_map):
"""Returns the peak locations in the feature map."""
batch_size, _, width, num_channels = _get_shape(feature_map, 4)
feature_map_flattened = tf.reshape(
feature_map, [batch_size, -1, num_channels])
peak_flat_indices = tf.math.argmax(
feature_map_flattened, axis=1, output_type=tf.dtypes.int32)
# Get x and y indices corresponding to the top indices in the flat array.
y_indices, x_indices = (
row_col_indices_from_flattened_indices(peak_flat_indices, width))
channel_indices = tf.tile(
tf.range(num_channels)[tf.newaxis, :], [batch_size, 1])
return y_indices, x_indices, channel_indices
def prediction_tensors_to_single_instance_kpts(
keypoint_heatmap_predictions,
keypoint_heatmap_offsets,
keypoint_score_heatmap=None):
"""Convert keypoint heatmap predictions and offsets to keypoint candidates.
Args:
keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,
width, num_keypoints] representing the per-keypoint heatmaps which is
used for finding the best keypoint candidate locations.
keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,
width, 2] (or [batch_size, height, width, 2 * num_keypoints] if
'per_keypoint_offset' is set True) representing the per-keypoint offsets.
keypoint_score_heatmap: (optional) A float tensor of shape [batch_size,
height, width, num_keypoints] representing the heatmap which is used for
reporting the confidence scores. If not provided, then the values in the
keypoint_heatmap_predictions will be used.
Returns:
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] with the number of candidates for each
keypoint type, as it's possible to filter some candidates due to the score
threshold.
"""
batch_size, _, _, num_keypoints = _get_shape(
keypoint_heatmap_predictions, 4)
# Get x, y and channel indices corresponding to the top indices in the
# keypoint heatmap predictions.
y_indices, x_indices, channel_indices = argmax_feature_map_locations(
keypoint_heatmap_predictions)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
_, num_keypoints = _get_shape(y_indices, 2)
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_keypoints),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1]),
], axis=1)
# shape: [num_keypoints, num_keypoints * 2]
selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,
combined_indices)
# shape: [num_keypoints, num_keypoints, 2].
selected_offsets_flat = tf.reshape(
selected_offsets_flat, [num_keypoints, num_keypoints, -1])
# shape: [num_keypoints].
channel_indices = tf.keras.backend.flatten(channel_indices)
# shape: [num_keypoints, 2].
retrieve_indices = tf.stack([channel_indices, channel_indices], axis=1)
# shape: [num_keypoints, 2]
selected_offsets = tf.gather_nd(selected_offsets_flat, retrieve_indices)
y_offsets, x_offsets = tf.unstack(selected_offsets, axis=1)
keypoint_candidates = tf.stack([
tf.cast(y_indices, dtype=tf.float32) + tf.expand_dims(y_offsets, axis=0),
tf.cast(x_indices, dtype=tf.float32) + tf.expand_dims(x_offsets, axis=0)
], axis=2)
keypoint_candidates = tf.expand_dims(keypoint_candidates, axis=0)
# Append the channel indices back to retrieve the keypoint scores from the
# heatmap.
combined_indices = tf.concat(
[combined_indices, tf.expand_dims(channel_indices, axis=-1)], axis=1)
if keypoint_score_heatmap is None:
keypoint_scores = tf.gather_nd(
keypoint_heatmap_predictions, combined_indices)
else:
keypoint_scores = tf.gather_nd(keypoint_score_heatmap, combined_indices)
keypoint_scores = tf.expand_dims(
tf.expand_dims(keypoint_scores, axis=0), axis=0)
return keypoint_candidates, keypoint_scores
def _score_to_distance_map(y_grid, x_grid, heatmap, points_y, points_x,
score_distance_offset):
"""Rescores heatmap using the distance information.
Rescore the heatmap scores using the formula:
score / (d + score_distance_offset), where the d is the distance from each
pixel location to the target point location.
Args:
y_grid: A float tensor with shape [height, width] representing the
y-coordinate of each pixel grid.
x_grid: A float tensor with shape [height, width] representing the
x-coordinate of each pixel grid.
heatmap: A float tensor with shape [1, height, width, channel]
representing the heatmap to be rescored.
points_y: A float tensor with shape [channel] representing the y
coordinates of the target points for each channel.
points_x: A float tensor with shape [channel] representing the x
coordinates of the target points for each channel.
score_distance_offset: A constant used in the above formula.
Returns:
A float tensor with shape [1, height, width, channel] representing the
rescored heatmap.
"""
y_diff = y_grid[:, :, tf.newaxis] - points_y
x_diff = x_grid[:, :, tf.newaxis] - points_x
distance = tf.math.sqrt(y_diff**2 + x_diff**2)
return tf.math.divide(heatmap, distance + score_distance_offset)
def prediction_to_single_instance_keypoints(
object_heatmap,
keypoint_heatmap,
keypoint_offset,
keypoint_regression,
kp_params,
keypoint_depths=None):
"""Postprocess function to predict single instance keypoints.
This is a simplified postprocessing function based on the assumption that
there is only one instance in the image. If there are multiple instances in
the image, the model prefers to predict the one that is closest to the image
center. Here is a high-level description of what this function does:
1) Object heatmap re-weighted by the distance between each pixel to the
image center is used to determine the instance center.
2) Regressed keypoint locations are retrieved from the instance center. The
Gaussian kernel is applied to the regressed keypoint locations to
re-weight the keypoint heatmap. This is to select the keypoints that are
associated with the center instance without using top_k op.
3) The keypoint locations are computed by the re-weighted keypoint heatmap
and the keypoint offset.
Args:
object_heatmap: A float tensor of shape [1, height, width, 1] representing
the heapmap of the class.
keypoint_heatmap: A float tensor of shape [1, height, width, num_keypoints]
representing the per-keypoint heatmaps.
keypoint_offset: A float tensor of shape [1, height, width, 2] (or [1,
height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True)
representing the per-keypoint offsets.
keypoint_regression: A float tensor of shape [1, height, width, 2 *
num_keypoints] representing the joint regression prediction.
kp_params: A `KeypointEstimationParams` object with parameters for a single
keypoint class.
keypoint_depths: (optional) A float tensor of shape [batch_size, height,
width, 1] (or [batch_size, height, width, num_keypoints] if
'per_keypoint_depth' is set True) representing the per-keypoint depths.
Returns:
A tuple of two tensors:
keypoint_candidates: A float tensor with shape [1, 1, num_keypoints, 2]
representing the yx-coordinates of the keypoints in the output feature
map space.
keypoint_scores: A float tensor with shape [1, 1, num_keypoints]
representing the keypoint prediction scores.
Raises:
ValueError: if the input keypoint_std_dev doesn't have valid number of
elements (1 or num_keypoints).
"""
# TODO(yuhuic): add the keypoint depth prediction logics in the browser
# postprocessing back.
del keypoint_depths
num_keypoints = len(kp_params.keypoint_std_dev)
batch_size, height, width, _ = _get_shape(keypoint_heatmap, 4)
# Create the image center location.
image_center_y = tf.convert_to_tensor([0.5 * height], dtype=tf.float32)
image_center_x = tf.convert_to_tensor([0.5 * width], dtype=tf.float32)
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height, width)
# Rescore the object heatmap by the distnace to the image center.
object_heatmap = _score_to_distance_map(
y_grid, x_grid, object_heatmap, image_center_y,
image_center_x, kp_params.score_distance_offset)
# Pick the highest score and location of the weighted object heatmap.
y_indices, x_indices, _ = argmax_feature_map_locations(object_heatmap)
_, num_indices = _get_shape(y_indices, 2)
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_indices),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
# Select the regression vectors from the object center.
selected_regression_flat = tf.gather_nd(keypoint_regression, combined_indices)
# shape: [num_keypoints, 2]
regression_offsets = tf.reshape(selected_regression_flat, [num_keypoints, -1])
(y_reg, x_reg) = tf.unstack(regression_offsets, axis=1)
y_regressed = tf.cast(y_indices, dtype=tf.float32) + y_reg
x_regressed = tf.cast(x_indices, dtype=tf.float32) + x_reg
if kp_params.candidate_ranking_mode == 'score_distance_ratio':
reweighted_keypoint_heatmap = _score_to_distance_map(
y_grid, x_grid, keypoint_heatmap, y_regressed, x_regressed,
kp_params.score_distance_offset)
else:
raise ValueError('Unsupported candidate_ranking_mode: %s' %
kp_params.candidate_ranking_mode)
# Get the keypoint locations/scores:
# keypoint_candidates: [1, 1, num_keypoints, 2]
# keypoint_scores: [1, 1, num_keypoints]
# depth_candidates: [1, 1, num_keypoints]
(keypoint_candidates, keypoint_scores
) = prediction_tensors_to_single_instance_kpts(
reweighted_keypoint_heatmap,
keypoint_offset,
keypoint_score_heatmap=keypoint_heatmap)
return keypoint_candidates, keypoint_scores, None
def _gaussian_weighted_map_const_multi(
y_grid, x_grid, heatmap, points_y, points_x, boxes,
gaussian_denom_ratio):
"""Rescores heatmap using the distance information.
The function is called when the candidate_ranking_mode in the
KeypointEstimationParams is set to be 'gaussian_weighted_const'. The
keypoint candidates are ranked using the formula:
heatmap_score * exp((-distances^2) / (gaussian_denom))
where 'gaussian_denom' is determined by:
min(output_feature_height, output_feature_width) * gaussian_denom_ratio
the 'distances' are the distances between the grid coordinates and the target
points.
Note that the postfix 'const' refers to the fact that the denominator is a
constant given the input image size, not scaled by the size of each of the
instances.
Args:
y_grid: A float tensor with shape [height, width] representing the
y-coordinate of each pixel grid.
x_grid: A float tensor with shape [height, width] representing the
x-coordinate of each pixel grid.
heatmap: A float tensor with shape [height, width, num_keypoints]
representing the heatmap to be rescored.
points_y: A float tensor with shape [num_instances, num_keypoints]
representing the y coordinates of the target points for each channel.
points_x: A float tensor with shape [num_instances, num_keypoints]
representing the x coordinates of the target points for each channel.
boxes: A tensor of shape [num_instances, 4] with predicted bounding boxes
for each instance, expressed in the output coordinate frame.
gaussian_denom_ratio: A constant used in the above formula that determines
the denominator of the Gaussian kernel.
Returns:
A float tensor with shape [height, width, channel] representing
the rescored heatmap.
"""
num_instances, _ = _get_shape(boxes, 2)
height, width, num_keypoints = _get_shape(heatmap, 3)
# [height, width, num_instances, num_keypoints].
# Note that we intentionally avoid using tf.newaxis as TfLite converter
# doesn't like it.
y_diff = (
tf.reshape(y_grid, [height, width, 1, 1]) -
tf.reshape(points_y, [1, 1, num_instances, num_keypoints]))
x_diff = (
tf.reshape(x_grid, [height, width, 1, 1]) -
tf.reshape(points_x, [1, 1, num_instances, num_keypoints]))
distance_square = y_diff * y_diff + x_diff * x_diff
y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=1)
# Make the mask with all 1.0 in the box regions.
# Shape: [height, width, num_instances]
in_boxes = tf.math.logical_and(
tf.math.logical_and(
tf.reshape(y_grid, [height, width, 1]) >= tf.reshape(
y_min, [1, 1, num_instances]),
tf.reshape(y_grid, [height, width, 1]) < tf.reshape(
y_max, [1, 1, num_instances])),
tf.math.logical_and(
tf.reshape(x_grid, [height, width, 1]) >= tf.reshape(
x_min, [1, 1, num_instances]),
tf.reshape(x_grid, [height, width, 1]) < tf.reshape(
x_max, [1, 1, num_instances])))
in_boxes = tf.cast(in_boxes, dtype=tf.float32)
gaussian_denom = tf.cast(
tf.minimum(height, width), dtype=tf.float32) * gaussian_denom_ratio
# shape: [height, width, num_instances, num_keypoints]
gaussian_map = tf.exp((-1 * distance_square) / gaussian_denom)
return tf.expand_dims(heatmap, axis=2) * gaussian_map * tf.reshape(
in_boxes, [height, width, num_instances, 1])
def prediction_tensors_to_multi_instance_kpts(
keypoint_heatmap_predictions,
keypoint_heatmap_offsets,
keypoint_score_heatmap=None):
"""Converts keypoint heatmap predictions and offsets to keypoint candidates.
This function is similar to the 'prediction_tensors_to_single_instance_kpts'
function except that the input keypoint_heatmap_predictions is prepared to
have an additional 'num_instances' dimension for multi-instance prediction.
Args:
keypoint_heatmap_predictions: A float tensor of shape [height,
width, num_instances, num_keypoints] representing the per-keypoint and
per-instance heatmaps which is used for finding the best keypoint
candidate locations.
keypoint_heatmap_offsets: A float tensor of shape [height,
width, 2 * num_keypoints] representing the per-keypoint offsets.
keypoint_score_heatmap: (optional) A float tensor of shape [height, width,
num_keypoints] representing the heatmap which is used for reporting the
confidence scores. If not provided, then the values in the
keypoint_heatmap_predictions will be used.
Returns:
keypoint_candidates: A tensor of shape
[1, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[1, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
"""
height, width, num_instances, num_keypoints = _get_shape(
keypoint_heatmap_predictions, 4)
# [height * width, num_instances * num_keypoints].
feature_map_flattened = tf.reshape(
keypoint_heatmap_predictions,
[-1, num_instances * num_keypoints])
# [num_instances * num_keypoints].
peak_flat_indices = tf.math.argmax(
feature_map_flattened, axis=0, output_type=tf.dtypes.int32)
# Get x and y indices corresponding to the top indices in the flat array.
y_indices, x_indices = (
row_col_indices_from_flattened_indices(peak_flat_indices, width))
# [num_instances * num_keypoints].
y_indices = tf.reshape(y_indices, [-1])
x_indices = tf.reshape(x_indices, [-1])
# Prepare the indices to gather the offsets from the keypoint_heatmap_offsets.
kpts_idx = _multi_range(
limit=num_keypoints, value_repetitions=1,
range_repetitions=num_instances)
combined_indices = tf.stack([
y_indices,
x_indices,
kpts_idx
], axis=1)
keypoint_heatmap_offsets = tf.reshape(
keypoint_heatmap_offsets, [height, width, num_keypoints, 2])
# Retrieve the keypoint offsets: shape:
# [num_instance * num_keypoints, 2].
selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,
combined_indices)
y_offsets, x_offsets = tf.unstack(selected_offsets_flat, axis=1)
keypoint_candidates = tf.stack([
tf.cast(y_indices, dtype=tf.float32) + tf.expand_dims(y_offsets, axis=0),
tf.cast(x_indices, dtype=tf.float32) + tf.expand_dims(x_offsets, axis=0)
], axis=2)
keypoint_candidates = tf.reshape(
keypoint_candidates, [num_instances, num_keypoints, 2])
if keypoint_score_heatmap is None:
keypoint_scores = tf.gather_nd(
tf.reduce_max(keypoint_heatmap_predictions, axis=2), combined_indices)
else:
keypoint_scores = tf.gather_nd(keypoint_score_heatmap, combined_indices)
return tf.expand_dims(keypoint_candidates, axis=0), tf.reshape(
keypoint_scores, [1, num_instances, num_keypoints])
def prediction_to_keypoints_argmax(
prediction_dict,
object_y_indices,
object_x_indices,
boxes,
task_name,
kp_params):
"""Postprocess function to predict multi instance keypoints with argmax op.
This is a different implementation of the original keypoint postprocessing
function such that it avoids using topk op (replaced by argmax) as it runs
much slower in the browser. Note that in this function, we assume the
batch_size to be 1 to avoid using 5D tensors which cause issues when
converting to the TfLite model.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
object_y_indices: A float tensor of shape [batch_size, max_instances]
representing the location indices of the object centers.
object_x_indices: A float tensor of shape [batch_size, max_instances]
representing the location indices of the object centers.
boxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame.
task_name: string, the name of the task this namedtuple corresponds to.
Note that it should be an unique identifier of the task.
kp_params: A `KeypointEstimationParams` object with parameters for a single
keypoint class.
Returns:
A tuple of two tensors:
keypoint_candidates: A float tensor with shape [batch_size,
num_instances, num_keypoints, 2] representing the yx-coordinates of
the keypoints in the output feature map space.
keypoint_scores: A float tensor with shape [batch_size, num_instances,
num_keypoints] representing the keypoint prediction scores.
Raises:
ValueError: if the candidate_ranking_mode is not supported.
"""
keypoint_heatmap = tf.squeeze(tf.nn.sigmoid(prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]), axis=0)
keypoint_offset = tf.squeeze(prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1], axis=0)
keypoint_regression = tf.squeeze(prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1], axis=0)
height, width, num_keypoints = _get_shape(keypoint_heatmap, 3)
# Create the y,x grids: [height, width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height, width)
# Prepare the indices to retrieve the information from object centers.
num_instances = _get_shape(object_y_indices, 2)[1]
combined_obj_indices = tf.stack([
tf.reshape(object_y_indices, [-1]),
tf.reshape(object_x_indices, [-1])
], axis=1)
# Select the regression vectors from the object center.
selected_regression_flat = tf.gather_nd(
keypoint_regression, combined_obj_indices)
selected_regression = tf.reshape(
selected_regression_flat, [num_instances, num_keypoints, 2])
(y_reg, x_reg) = tf.unstack(selected_regression, axis=2)
# shape: [num_instances, num_keypoints].
y_regressed = tf.cast(
tf.reshape(object_y_indices, [num_instances, 1]),
dtype=tf.float32) + y_reg
x_regressed = tf.cast(
tf.reshape(object_x_indices, [num_instances, 1]),
dtype=tf.float32) + x_reg
if kp_params.candidate_ranking_mode == 'gaussian_weighted_const':
rescored_heatmap = _gaussian_weighted_map_const_multi(
y_grid, x_grid, keypoint_heatmap, y_regressed, x_regressed,
tf.squeeze(boxes, axis=0), kp_params.gaussian_denom_ratio)
# shape: [height, width, num_keypoints].
keypoint_score_heatmap = tf.math.reduce_max(rescored_heatmap, axis=2)
else:
raise ValueError(
'Unsupported ranking mode in the multipose no topk method: %s' %
kp_params.candidate_ranking_mode)
(keypoint_candidates,
keypoint_scores) = prediction_tensors_to_multi_instance_kpts(
keypoint_heatmap_predictions=rescored_heatmap,
keypoint_heatmap_offsets=keypoint_offset,
keypoint_score_heatmap=keypoint_score_heatmap)
return keypoint_candidates, keypoint_scores
def regressed_keypoints_at_object_centers(regressed_keypoint_predictions,
y_indices, x_indices):
"""Returns the regressed keypoints at specified object centers.
The original keypoint predictions are regressed relative to each feature map
location. The returned keypoints are expressed in absolute coordinates in the
output frame (i.e. the center offsets are added to each individual regressed
set of keypoints).
Args:
regressed_keypoint_predictions: A float tensor of shape
[batch_size, height, width, 2 * num_keypoints] holding regressed
keypoints. The last dimension has keypoint coordinates ordered as follows:
[y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where
regressed keypoints are gathered at the provided locations, and converted
to absolute coordinates in the output coordinate frame.
"""
batch_size, num_instances = _get_shape(y_indices, 2)
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(batch_size, value_repetitions=num_instances),
tf.reshape(y_indices, [-1]),
tf.reshape(x_indices, [-1])
], axis=1)
relative_regressed_keypoints = tf.gather_nd(regressed_keypoint_predictions,
combined_indices)
relative_regressed_keypoints = tf.reshape(
relative_regressed_keypoints,
[batch_size, num_instances, -1, 2])
relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack(
relative_regressed_keypoints, axis=3)
y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1))
x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1))
absolute_regressed_keypoints = tf.stack(
[y_indices + relative_regressed_keypoints_y,
x_indices + relative_regressed_keypoints_x],
axis=3)
return tf.reshape(absolute_regressed_keypoints,
[batch_size, num_instances, -1])
def sdr_scaled_ranking_score(
keypoint_scores, distances, bboxes, score_distance_multiplier):
"""Score-to-distance-ratio method to rank keypoint candidates.
This corresponds to the ranking method: 'score_scaled_distance_ratio'. The
keypoint candidates are ranked using the formula:
ranking_score = score / (distance + offset)
where 'score' is the keypoint heatmap scores, 'distance' is the distance
between the heatmap peak location and the regressed joint location,
'offset' is a function of the predicted bounding box:
offset = max(bbox height, bbox width) * score_distance_multiplier
The ranking score is used to find the best keypoint candidate for snapping
regressed joints.
Args:
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
distances: A float tensor of shape
[batch_size, num_instances, max_candidates, num_keypoints] indicating the
distances between the keypoint candidates and the joint regression
locations of each instances.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
score_distance_multiplier: A scalar used to multiply the bounding box size
to be the offset in the score-to-distance-ratio formula.
Returns:
A float tensor of shape [batch_size, num_instances, max_candidates,
num_keypoints] representing the ranking scores of each keypoint candidates.
"""
# Get ymin, xmin, ymax, xmax bounding box coordinates.
# Shape: [batch_size, num_instances]
ymin, xmin, ymax, xmax = tf.unstack(bboxes, axis=2)
# Shape: [batch_size, num_instances].
offsets = tf.math.maximum(
ymax - ymin, xmax - xmin) * score_distance_multiplier
# Shape: [batch_size, num_instances, max_candidates, num_keypoints]
ranking_scores = keypoint_scores[:, tf.newaxis, :, :] / (
distances + offsets[:, :, tf.newaxis, tf.newaxis])
return ranking_scores
def gaussian_weighted_score(
keypoint_scores, distances, keypoint_std_dev, bboxes):
"""Gaussian weighted method to rank keypoint candidates.
This corresponds to the ranking method: 'gaussian_weighted'. The
keypoint candidates are ranked using the formula:
score * exp((-distances^2) / (2 * sigma^2))
where 'score' is the keypoint heatmap score, 'distances' is the distance
between the heatmap peak location and the regressed joint location and 'sigma'
is a Gaussian standard deviation used in generating the Gausian heatmap target
multiplied by the 'std_dev_multiplier'.
The ranking score is used to find the best keypoint candidate for snapping
regressed joints.
Args:
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
distances: A float tensor of shape
[batch_size, num_instances, max_candidates, num_keypoints] indicating the
distances between the keypoint candidates and the joint regression
locations of each instances.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap. It is to provide
the flexibility of using different sizes of Gaussian kernel for each
keypoint class.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
Returns:
A float tensor of shape [batch_size, num_instances, max_candidates,
num_keypoints] representing the ranking scores of each keypoint candidates.
"""
# Get ymin, xmin, ymax, xmax bounding box coordinates.
# Shape: [batch_size, num_instances]
ymin, xmin, ymax, xmax = tf.unstack(bboxes, axis=2)
# shape: [num_keypoints]
keypoint_std_dev = tf.constant(keypoint_std_dev)
# shape: [batch_size, num_instances]
sigma = cn_assigner._compute_std_dev_from_box_size( # pylint: disable=protected-access
ymax - ymin, xmax - xmin, min_overlap=0.7)
# shape: [batch_size, num_instances, num_keypoints]
sigma = keypoint_std_dev[tf.newaxis, tf.newaxis, :] * sigma[:, :, tf.newaxis]
(_, _, max_candidates, _) = _get_shape(distances, 4)
# shape: [batch_size, num_instances, max_candidates, num_keypoints]
sigma = tf.tile(
sigma[:, :, tf.newaxis, :], multiples=[1, 1, max_candidates, 1])
gaussian_map = tf.exp((-1 * distances * distances) / (2 * sigma * sigma))
return keypoint_scores[:, tf.newaxis, :, :] * gaussian_map
def refine_keypoints(regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=None,
unmatched_keypoint_score=0.1,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance',
score_distance_offset=1e-6,
keypoint_depth_candidates=None,
keypoint_score_threshold=0.1,
score_distance_multiplier=0.1,
keypoint_std_dev=None):
"""Refines regressed keypoints by snapping to the nearest candidate keypoints.
The initial regressed keypoints represent a full set of keypoints regressed
from the centers of the objects. The keypoint candidates are estimated
independently from heatmaps, and are not associated with any object instances.
This function refines the regressed keypoints by "snapping" to the
nearest/highest score/highest score-distance ratio (depending on the
candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").
If no candidates are nearby, the regressed keypoint remains unchanged.
In order to snap a regressed keypoint to a candidate keypoint, the following
must be satisfied:
- the candidate keypoint must be of the same type as the regressed keypoint
- the candidate keypoint must not lie outside the predicted boxes (or the
boxes which encloses the regressed keypoints for the instance if `bboxes` is
not provided). Note that the box is scaled by
`regressed_box_scale` in height and width, to provide some margin around the
keypoints
- the distance to the closest candidate keypoint cannot exceed
candidate_search_scale * max(height, width), where height and width refer to
the bounding box for the instance.
Note that the same candidate keypoint is allowed to snap to regressed
keypoints in difference instances.
Args:
regressed_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the initial regressed
keypoints.
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the location of
keypoint candidates in [y, x] format (expressed in absolute coordinates in
the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] indicating the number of valid candidates for
each keypoint type, as there may be padding (dim 1) of
`keypoint_candidates` and `keypoint_scores`.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
unmatched_keypoint_score: float, the default score to use for regressed
keypoints that are not successfully snapped to a nearby candidate.
box_scale: float, the multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints) for
an instance. This scale is typically larger than 1.0 when not providing
`bboxes`.
candidate_search_scale: float, the scale parameter that multiplies the
largest dimension of a bounding box. The resulting distance becomes a
search radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: A string as one of ['min_distance',
'score_distance_ratio', 'score_scaled_distance_ratio',
'gaussian_weighted'] indicating how to select the candidate. If invalid
value is provided, an ValueError will be raised.
score_distance_offset: The distance offset to apply in the denominator when
candidate_ranking_mode is 'score_distance_ratio'. The metric to maximize
in this scenario is score / (distance + score_distance_offset). Larger
values of score_distance_offset make the keypoint score gain more relative
importance.
keypoint_depth_candidates: (optional) A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the depths for
keypoint candidates.
keypoint_score_threshold: float, The heatmap score threshold for
a keypoint to become a valid candidate.
score_distance_multiplier: A scalar used to multiply the bounding box size
to be the offset in the score-to-distance-ratio formula.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to rank the keypoint candidates. It offers the
flexibility of using different sizes of Gaussian kernel for each keypoint
class. Only applicable when the candidate_ranking_mode equals to
'gaussian_weighted'.
Returns:
A tuple with:
refined_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the final, refined
keypoints.
refined_scores: A float tensor of shape
[batch_size, num_instances, num_keypoints] with scores associated with all
instances and keypoints in `refined_keypoints`.
Raises:
ValueError: if provided candidate_ranking_mode is not one of
['min_distance', 'score_distance_ratio']
"""
batch_size, num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(regressed_keypoints))
max_candidates = keypoint_candidates.shape[1]
# Replace all invalid (i.e. padded) keypoint candidates with NaN.
# This will prevent them from being considered.
range_tiled = tf.tile(
tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]),
[batch_size, 1, num_keypoints])
num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1),
[1, max_candidates, 1])
invalid_candidates = range_tiled >= num_candidates_tiled
# Pairwise squared distances between regressed keypoints and candidate
# keypoints (for a single keypoint type).
# Shape [batch_size, num_instances, 1, num_keypoints, 2].
regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints,
axis=2)
# Shape [batch_size, 1, max_candidates, num_keypoints, 2].
keypoint_candidates_expanded = tf.expand_dims(
keypoint_candidates, axis=1)
# Use explicit tensor shape broadcasting (since the tensor dimensions are
# expanded to 5D) to make it tf.lite compatible.
regressed_keypoint_expanded = tf.tile(
regressed_keypoint_expanded, multiples=[1, 1, max_candidates, 1, 1])
keypoint_candidates_expanded = tf.tile(
keypoint_candidates_expanded, multiples=[1, num_instances, 1, 1, 1])
# Replace tf.math.squared_difference by "-" operator and tf.multiply ops since
# tf.lite convert doesn't support squared_difference with undetermined
# dimension.
diff = regressed_keypoint_expanded - keypoint_candidates_expanded
sqrd_distances = tf.math.reduce_sum(tf.multiply(diff, diff), axis=-1)
distances = tf.math.sqrt(sqrd_distances)
# Replace the invalid candidated with large constant (10^5) to make sure the
# following reduce_min/argmin behaves properly.
max_dist = 1e5
distances = tf.where(
tf.tile(
tf.expand_dims(invalid_candidates, axis=1),
multiples=[1, num_instances, 1, 1]),
tf.ones_like(distances) * max_dist,
distances
)
# Determine the candidates that have the minimum distance to the regressed
# keypoints. Shape [batch_size, num_instances, num_keypoints].
min_distances = tf.math.reduce_min(distances, axis=2)
if candidate_ranking_mode == 'min_distance':
nearby_candidate_inds = tf.math.argmin(distances, axis=2)
elif candidate_ranking_mode == 'score_distance_ratio':
# tiled_keypoint_scores:
# Shape [batch_size, num_instances, max_candidates, num_keypoints].
tiled_keypoint_scores = tf.tile(
tf.expand_dims(keypoint_scores, axis=1),
multiples=[1, num_instances, 1, 1])
ranking_scores = tiled_keypoint_scores / (distances + score_distance_offset)
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
elif candidate_ranking_mode == 'score_scaled_distance_ratio':
ranking_scores = sdr_scaled_ranking_score(
keypoint_scores, distances, bboxes, score_distance_multiplier)
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
elif candidate_ranking_mode == 'gaussian_weighted':
ranking_scores = gaussian_weighted_score(
keypoint_scores, distances, keypoint_std_dev, bboxes)
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
weighted_scores = tf.math.reduce_max(ranking_scores, axis=2)
else:
raise ValueError('Not recognized candidate_ranking_mode: %s' %
candidate_ranking_mode)
# Gather the coordinates and scores corresponding to the closest candidates.
# Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and
# [batch_size, num_instances, num_keypoints], respectively.
(nearby_candidate_coords, nearby_candidate_scores,
nearby_candidate_depths) = (
_gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
nearby_candidate_inds,
keypoint_depth_candidates))
# If the ranking mode is 'gaussian_weighted', we use the ranking scores as the
# final keypoint confidence since their values are in between [0, 1].
if candidate_ranking_mode == 'gaussian_weighted':
nearby_candidate_scores = weighted_scores
if bboxes is None:
# Filter out the chosen candidate with score lower than unmatched
# keypoint score.
mask = tf.cast(nearby_candidate_scores <
keypoint_score_threshold, tf.int32)
else:
bboxes_flattened = tf.reshape(bboxes, [-1, 4])
# Scale the bounding boxes.
# Shape [batch_size, num_instances, 4].
boxlist = box_list.BoxList(bboxes_flattened)
boxlist_scaled = box_list_ops.scale_height_width(
boxlist, box_scale, box_scale)
bboxes_scaled = boxlist_scaled.get()
bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])
# Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint.
# Shape [batch_size, num_instances, num_keypoints].
bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])
ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3)
# Produce a mask that indicates whether the original regressed keypoint
# should be used instead of a candidate keypoint.
# Shape [batch_size, num_instances, num_keypoints].
search_radius = (
tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale)
mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) +
# Filter out the chosen candidate with score lower than unmatched
# keypoint score.
tf.cast(nearby_candidate_scores <
keypoint_score_threshold, tf.int32) +
tf.cast(min_distances > search_radius, tf.int32))
mask = mask > 0
# Create refined keypoints where candidate keypoints replace original
# regressed keypoints if they are in the vicinity of the regressed keypoints.
# Shape [batch_size, num_instances, num_keypoints, 2].
refined_keypoints = tf.where(
tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]),
regressed_keypoints,
nearby_candidate_coords)
# Update keypoints scores. In the case where we use the original regressed
# keypoints, we use a default score of `unmatched_keypoint_score`.
# Shape [batch_size, num_instances, num_keypoints].
refined_scores = tf.where(
mask,
unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores),
nearby_candidate_scores)
refined_depths = None
if nearby_candidate_depths is not None:
refined_depths = tf.where(mask, tf.zeros_like(nearby_candidate_depths),
nearby_candidate_depths)
return refined_keypoints, refined_scores, refined_depths
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,
num_total_keypoints):
"""Scatter keypoint elements into tensors with full keypoints dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
keypoint_inds: a list of integers that indicate the keypoint indices for
this specific keypoint class. These indices are used to scatter into
tensors that have a `num_total_keypoints` dimension.
num_total_keypoints: The total number of keypoints that this model predicts.
Returns:
A tuple with
keypoint_coords_padded: a
[batch_size, num_instances, num_total_keypoints,2] float32 tensor.
keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]
float32 tensor.
"""
batch_size, num_instances, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_coords_transposed,
shape=[num_total_keypoints, batch_size, num_instances, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_scores_transposed,
shape=[num_total_keypoints, batch_size, num_instances])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
return keypoint_coords_padded, keypoint_scores_padded
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,
max_instances):
"""Scatter keypoint elements into tensors with full instance dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
instance_inds: a list of integers that indicate the instance indices for
these keypoints. These indices are used to scatter into tensors
that have a `max_instances` dimension.
max_instances: The maximum number of instances detected by the model.
Returns:
A tuple with
keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]
float32 tensor.
keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]
float32 tensor.
"""
batch_size, _, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
instance_inds = tf.expand_dims(instance_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_coords_transposed,
shape=[max_instances, batch_size, num_keypoints, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_scores_transposed,
shape=[max_instances, batch_size, num_keypoints])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
return keypoint_coords_padded, keypoint_scores_padded
def _gather_candidates_at_indices(keypoint_candidates,
keypoint_scores,
indices,
keypoint_depth_candidates=None):
"""Gathers keypoint candidate coordinates and scores at indices.
Args:
keypoint_candidates: a float tensor of shape [batch_size, max_candidates,
num_keypoints, 2] with candidate coordinates.
keypoint_scores: a float tensor of shape [batch_size, max_candidates,
num_keypoints] with keypoint scores.
indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]
with indices.
keypoint_depth_candidates: (optional) a float tensor of shape [batch_size,
max_candidates, num_keypoints] with keypoint depths.
Returns:
A tuple with
gathered_keypoint_candidates: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2] with gathered coordinates.
gathered_keypoint_scores: a float tensor of shape [batch_size,
num_indices, num_keypoints].
gathered_keypoint_depths: a float tensor of shape [batch_size,
num_indices, num_keypoints]. Return None if the input
keypoint_depth_candidates is None.
"""
batch_size, num_indices, num_keypoints = _get_shape(indices, 3)
# Transpose tensors so that all batch dimensions are up front.
keypoint_candidates_transposed = tf.transpose(keypoint_candidates,
[0, 2, 1, 3])
keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])
nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1])
# TF Lite does not support tf.gather with batch_dims > 0, so we need to use
# tf_gather_nd instead and here we prepare the indices for that.
combined_indices = tf.stack([
_multi_range(
batch_size,
value_repetitions=num_keypoints * num_indices,
dtype=tf.int64),
_multi_range(
num_keypoints,
value_repetitions=num_indices,
range_repetitions=batch_size,
dtype=tf.int64),
tf.reshape(nearby_candidate_inds_transposed, [-1])
], axis=1)
nearby_candidate_coords_transposed = tf.gather_nd(
keypoint_candidates_transposed, combined_indices)
nearby_candidate_coords_transposed = tf.reshape(
nearby_candidate_coords_transposed,
[batch_size, num_keypoints, num_indices, -1])
nearby_candidate_scores_transposed = tf.gather_nd(keypoint_scores_transposed,
combined_indices)
nearby_candidate_scores_transposed = tf.reshape(
nearby_candidate_scores_transposed,
[batch_size, num_keypoints, num_indices])
gathered_keypoint_candidates = tf.transpose(
nearby_candidate_coords_transposed, [0, 2, 1, 3])
# The reshape operation above may result in a singleton last dimension, but
# downstream code requires it to always be at least 2-valued.
original_shape = tf.shape(gathered_keypoint_candidates)
new_shape = tf.concat((original_shape[:3],
[tf.maximum(original_shape[3], 2)]), 0)
gathered_keypoint_candidates = tf.reshape(gathered_keypoint_candidates,
new_shape)
gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed,
[0, 2, 1])
gathered_keypoint_depths = None
if keypoint_depth_candidates is not None:
keypoint_depths_transposed = tf.transpose(keypoint_depth_candidates,
[0, 2, 1])
nearby_candidate_depths_transposed = tf.gather_nd(
keypoint_depths_transposed, combined_indices)
nearby_candidate_depths_transposed = tf.reshape(
nearby_candidate_depths_transposed,
[batch_size, num_keypoints, num_indices])
gathered_keypoint_depths = tf.transpose(nearby_candidate_depths_transposed,
[0, 2, 1])
return (gathered_keypoint_candidates, gathered_keypoint_scores,
gathered_keypoint_depths)
def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):
"""Get the index in a flattened array given row and column indices."""
return (row_indices * num_cols) + col_indices
def row_col_channel_indices_from_flattened_indices(indices, num_cols,
num_channels):
"""Computes row, column and channel indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
num_channels: Number of channels in the image.
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
channel_indices. The channel indices corresponding to each of the input
indices.
"""
# Be careful with this function when running a model in float16 precision
# (e.g. TF.js with WebGL) because the array indices may not be represented
# accurately if they are too large, resulting in incorrect channel indices.
# See:
# https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_integer_values
#
# Avoid using mod operator to make the ops more easy to be compatible with
# different environments, e.g. WASM.
row_indices = (indices // num_channels) // num_cols
col_indices = (indices // num_channels) - row_indices * num_cols
channel_indices_temp = indices // num_channels
channel_indices = indices - channel_indices_temp * num_channels
return row_indices, col_indices, channel_indices
def row_col_indices_from_flattened_indices(indices, num_cols):
"""Computes row and column indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
"""
# Avoid using mod operator to make the ops more easy to be compatible with
# different environments, e.g. WASM.
row_indices = indices // num_cols
col_indices = indices - row_indices * num_cols
return row_indices, col_indices
def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,
width):
"""Computes valid anchor weights for an image assuming pixels will be flattened.
This function is useful when we only want to penalize valid areas in the
image in the case when padding is used. The function assumes that the loss
function will be applied after flattening the spatial dimensions and returns
anchor weights accordingly.
Args:
true_image_shapes: An integer tensor of shape [batch_size, 3] representing
the true image shape (without padding) for each sample in the batch.
height: height of the prediction from the network.
width: width of the prediction from the network.
Returns:
valid_anchor_weights: a float tensor of shape [batch_size, height * width]
with 1s in locations where the spatial coordinates fall within the height
and width in true_image_shapes.
"""
indices = tf.reshape(tf.range(height * width), [1, -1])
batch_size = tf.shape(true_image_shapes)[0]
batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices
y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(
batch_indices, width, 1)
max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]
max_x = _to_float32(tf.expand_dims(max_x, 1))
max_y = _to_float32(tf.expand_dims(max_y, 1))
x_coords = _to_float32(x_coords)
y_coords = _to_float32(y_coords)
valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)
return _to_float32(valid_mask)
def convert_strided_predictions_to_normalized_boxes(boxes, stride,
true_image_shapes):
"""Converts predictions in the output space to normalized boxes.
Boxes falling outside the valid image boundary are clipped to be on the
boundary.
Args:
boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw
coordinates of boxes in the model's output space.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
Returns:
boxes: A tensor of shape [batch_size, num_boxes, 4] representing the
coordinates of the normalized boxes.
"""
# Note: We use tf ops instead of functions in box_list_ops to make this
# function compatible with dynamic batch size.
boxes = boxes * stride
true_image_shapes = tf.tile(true_image_shapes[:, tf.newaxis, :2], [1, 1, 2])
boxes = boxes / tf.cast(true_image_shapes, tf.float32)
boxes = tf.clip_by_value(boxes, 0.0, 1.0)
return boxes
def convert_strided_predictions_to_normalized_keypoints(
keypoint_coords, keypoint_scores, stride, true_image_shapes,
clip_out_of_frame_keypoints=False):
"""Converts predictions in the output space to normalized keypoints.
If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside
the valid image boundary are normalized but not clipped; If
clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the
valid image boundary are clipped to the closest image boundary and the scores
will be set to 0.0.
Args:
keypoint_coords: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] holding the raw coordinates
of keypoints in the model's output space.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] holding the keypoint scores.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside
the image boundary should be clipped. If True, keypoint coords will be
clipped to image boundary. If False, keypoints are normalized but not
filtered based on their location.
Returns:
keypoint_coords_normalized: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] representing the coordinates
of the normalized keypoints.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] representing the updated
keypoint scores.
"""
# Flatten keypoints and scores.
batch_size, _, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
# Scale and normalize keypoints.
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
yscale = float(stride) / tf.cast(true_heights, tf.float32)
xscale = float(stride) / tf.cast(true_widths, tf.float32)
yx_scale = tf.stack([yscale, xscale], axis=1)
keypoint_coords_normalized = keypoint_coords * tf.reshape(
yx_scale, [batch_size, 1, 1, 2])
if clip_out_of_frame_keypoints:
# Determine the keypoints that are in the true image regions.
valid_indices = tf.logical_and(
tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0,
keypoint_coords_normalized[:, :, :, 0] <= 1.0),
tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0,
keypoint_coords_normalized[:, :, :, 1] <= 1.0))
batch_window = tf.tile(
tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32),
multiples=[batch_size, 1])
def clip_to_window(inputs):
keypoints, window = inputs
return keypoint_ops.clip_to_window(keypoints, window)
keypoint_coords_normalized = shape_utils.static_or_dynamic_map_fn(
clip_to_window, [keypoint_coords_normalized, batch_window],
dtype=tf.float32, back_prop=False)
keypoint_scores = tf.where(valid_indices, keypoint_scores,
tf.zeros_like(keypoint_scores))
return keypoint_coords_normalized, keypoint_scores
def convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap=None, densepose_surface_coords=None, stride=4,
mask_height=256, mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Converts predicted full-image masks into instance masks.
For each predicted detection box:
* Crop and resize the predicted mask (and optionally DensePose coordinates)
based on the detected bounding box coordinates and class prediction. Uses
bilinear resampling.
* Binarize the mask using the provided score threshold.
Args:
boxes: A tensor of shape [batch, max_detections, 4] holding the predicted
boxes, in normalized coordinates (relative to the true image dimensions).
classes: An integer tensor of shape [batch, max_detections] containing the
detected class for each box (0-indexed).
masks: A [batch, output_height, output_width, num_classes] float32
tensor with class probabilities.
true_image_shapes: A tensor of shape [batch, 3] representing the true
shape of the inputs not considering padding.
densepose_part_heatmap: (Optional) A [batch, output_height, output_width,
num_parts] float32 tensor with part scores (i.e. logits).
densepose_surface_coords: (Optional) A [batch, output_height, output_width,
2 * num_parts] float32 tensor with predicted part coordinates (in
vu-format).
stride: The stride in the output space.
mask_height: The desired resized height for instance masks.
mask_width: The desired resized width for instance masks.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: The class index (0-indexed) corresponding to the
class which has DensePose labels (e.g. person class).
Returns:
A tuple of masks and surface_coords.
instance_masks: A [batch_size, max_detections, mask_height, mask_width]
uint8 tensor with predicted foreground mask for each
instance. If DensePose tensors are provided, then each pixel value in the
mask encodes the 1-indexed part.
surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. Note that v, u coordinates are
only defined on instance masks, and the coordinates at each location of
the foreground mask correspond to coordinates on a local part coordinate
system (the specific part can be inferred from the `instance_masks`
output. If DensePose feature maps are not passed to this function, this
output will be None.
Raises:
ValueError: If one but not both of `densepose_part_heatmap` and
`densepose_surface_coords` is provided.
"""
batch_size, output_height, output_width, _ = (
shape_utils.combined_static_and_dynamic_shape(masks))
input_height = stride * output_height
input_width = stride * output_width
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
# If necessary, create dummy DensePose tensors to simplify the map function.
densepose_present = True
if ((densepose_part_heatmap is not None) ^
(densepose_surface_coords is not None)):
raise ValueError('To use DensePose, both `densepose_part_heatmap` and '
'`densepose_surface_coords` must be provided')
if densepose_part_heatmap is None and densepose_surface_coords is None:
densepose_present = False
densepose_part_heatmap = tf.zeros(
(batch_size, output_height, output_width, 1), dtype=tf.float32)
densepose_surface_coords = tf.zeros(
(batch_size, output_height, output_width, 2), dtype=tf.float32)
crop_and_threshold_fn = functools.partial(
crop_and_threshold_masks, input_height=input_height,
input_width=input_width, mask_height=mask_height, mask_width=mask_width,
score_threshold=score_threshold,
densepose_class_index=densepose_class_index)
instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn(
crop_and_threshold_fn,
elems=[boxes, classes, masks, densepose_part_heatmap,
densepose_surface_coords, true_heights, true_widths],
dtype=[tf.uint8, tf.float32],
back_prop=False)
surface_coords = surface_coords if densepose_present else None
return instance_masks, surface_coords
def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256,
mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Crops and thresholds masks based on detection boxes.
Args:
elems: A tuple of
boxes - float32 tensor of shape [max_detections, 4]
classes - int32 tensor of shape [max_detections] (0-indexed)
masks - float32 tensor of shape [output_height, output_width, num_classes]
part_heatmap - float32 tensor of shape [output_height, output_width,
num_parts]
surf_coords - float32 tensor of shape [output_height, output_width,
2 * num_parts]
true_height - scalar int tensor
true_width - scalar int tensor
input_height: Input height to network.
input_width: Input width to network.
mask_height: Height for resizing mask crops.
mask_width: Width for resizing mask crops.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: scalar int tensor with the class index (0-indexed)
for DensePose.
Returns:
A tuple of
all_instances: A [max_detections, mask_height, mask_width] uint8 tensor
with a predicted foreground mask for each instance. Background is encoded
as 0, and foreground is encoded as a positive integer. Specific part
indices are encoded as 1-indexed parts (for classes that have part
information).
surface_coords: A [max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. for each part.
"""
(boxes, classes, masks, part_heatmap, surf_coords, true_height,
true_width) = elems
# Boxes are in normalized coordinates relative to true image shapes. Convert
# coordinates to be normalized relative to input image shapes (since masks
# may still have padding).
boxlist = box_list.BoxList(boxes)
y_scale = true_height / input_height
x_scale = true_width / input_width
boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)
boxes = boxlist.get()
# Convert masks from [output_height, output_width, num_classes] to
# [num_classes, output_height, output_width, 1].
num_classes = tf.shape(masks)[-1]
masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]
# Tile part and surface coordinate masks for all classes.
part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d],
axis=-1)
# The following tensor has shape
# [max_detections, mask_height, mask_width, 1 + 3 * num_parts].
cropped_masks = tf2.image.crop_and_resize(
feature_maps_concat,
boxes=boxes,
box_indices=classes,
crop_size=[mask_height, mask_width],
method='bilinear')
# Split the cropped masks back into instance masks, part masks, and surface
# coordinates.
num_parts = tf.shape(part_heatmap)[-1]
instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split(
cropped_masks, [1, num_parts, 2 * num_parts], axis=-1)
# Threshold the instance masks. Resulting tensor has shape
# [max_detections, mask_height, mask_width, 1].
instance_masks_int = tf.cast(
tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)
# Produce a binary mask that is 1.0 only:
# - in the foreground region for an instance
# - in detections corresponding to the DensePose class
det_with_parts = tf.equal(classes, densepose_class_index)
det_with_parts = tf.cast(
tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32)
instance_masks_with_parts = tf.math.multiply(instance_masks_int,
det_with_parts)
# Similarly, produce a binary mask that holds the foreground masks only for
# instances without parts (i.e. non-DensePose classes).
det_without_parts = 1 - det_with_parts
instance_masks_without_parts = tf.math.multiply(instance_masks_int,
det_without_parts)
# Assemble a tensor that has standard instance segmentation masks for
# non-DensePose classes (with values in [0, 1]), and part segmentation masks
# for DensePose classes (with vaues in [0, 1, ..., num_parts]).
part_mask_int_zero_indexed = tf.math.argmax(
part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis]
part_mask_int_one_indexed = part_mask_int_zero_indexed + 1
all_instances = (instance_masks_without_parts +
instance_masks_with_parts * part_mask_int_one_indexed)
# Gather the surface coordinates for the parts.
surface_coords_cropped = tf.reshape(
surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2])
surface_coords = gather_surface_coords_for_parts(surface_coords_cropped,
part_mask_int_zero_indexed)
surface_coords = (
surface_coords * tf.cast(instance_masks_with_parts, tf.float32))
return [tf.squeeze(all_instances, axis=3), surface_coords]
def gather_surface_coords_for_parts(surface_coords_cropped,
highest_scoring_part):
"""Gathers the (v, u) coordinates for the highest scoring DensePose parts.
Args:
surface_coords_cropped: A [max_detections, height, width, num_parts, 2]
float32 tensor with (v, u) surface coordinates.
highest_scoring_part: A [max_detections, height, width] integer tensor with
the highest scoring part (0-indexed) indices for each location.
Returns:
A [max_detections, height, width, 2] float32 tensor with the (v, u)
coordinates selected from the highest scoring parts.
"""
max_detections, height, width, num_parts, _ = (
shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped))
flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2])
flattened_part_ids = tf.reshape(highest_scoring_part, [-1])
# Produce lookup indices that represent the locations of the highest scoring
# parts in the `flattened_surface_coords` tensor.
flattened_lookup_indices = (
num_parts * tf.range(max_detections * height * width) +
flattened_part_ids)
vu_coords_flattened = tf.gather(flattened_surface_coords,
flattened_lookup_indices, axis=0)
return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])
def predicted_embeddings_at_object_centers(embedding_predictions,
y_indices, x_indices):
"""Returns the predicted embeddings at specified object centers.
Args:
embedding_predictions: A float tensor of shape [batch_size, height, width,
reid_embed_size] holding predicted embeddings.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, reid_embed_size] where
predicted embeddings are gathered at the provided locations.
"""
batch_size, _, width, _ = _get_shape(embedding_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
_, num_instances = _get_shape(flattened_indices, 2)
embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)
embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)
embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1])
return embeddings
def mask_from_true_image_shape(data_shape, true_image_shapes):
"""Get a binary mask based on the true_image_shape.
Args:
data_shape: a possibly static (4,) tensor for the shape of the feature
map.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true
images in the resized images, as resized images can be padded with
zeros.
Returns:
a [batch, data_height, data_width, 1] tensor of 1.0 wherever data_height
is less than height, etc.
"""
mask_h = tf.cast(
tf.range(data_shape[1]) < true_image_shapes[:, tf.newaxis, 0],
tf.float32)
mask_w = tf.cast(
tf.range(data_shape[2]) < true_image_shapes[:, tf.newaxis, 1],
tf.float32)
mask = tf.expand_dims(
mask_h[:, :, tf.newaxis] * mask_w[:, tf.newaxis, :], 3)
return mask
class ObjectDetectionParams(
collections.namedtuple('ObjectDetectionParams', [
'localization_loss', 'scale_loss_weight', 'offset_loss_weight',
'task_loss_weight', 'scale_head_num_filters',
'scale_head_kernel_sizes', 'offset_head_num_filters',
'offset_head_kernel_sizes'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the object detection task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
localization_loss,
scale_loss_weight,
offset_loss_weight,
task_loss_weight=1.0,
scale_head_num_filters=(256),
scale_head_kernel_sizes=(3),
offset_head_num_filters=(256),
offset_head_kernel_sizes=(3)):
"""Constructor with default values for ObjectDetectionParams.
Args:
localization_loss: a object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
scale_loss_weight: float, The weight for localizing box size. Note that
the scale loss is dependent on the input image size, since we penalize
the raw height and width. This constant may need to be adjusted
depending on the input size.
offset_loss_weight: float, The weight for localizing center offsets.
task_loss_weight: float, the weight of the object detection loss.
scale_head_num_filters: filter numbers of the convolutional layers used
by the object detection box scale prediction head.
scale_head_kernel_sizes: kernel size of the convolutional layers used
by the object detection box scale prediction head.
offset_head_num_filters: filter numbers of the convolutional layers used
by the object detection box offset prediction head.
offset_head_kernel_sizes: kernel size of the convolutional layers used
by the object detection box offset prediction head.
Returns:
An initialized ObjectDetectionParams namedtuple.
"""
return super(ObjectDetectionParams,
cls).__new__(cls, localization_loss, scale_loss_weight,
offset_loss_weight, task_loss_weight,
scale_head_num_filters, scale_head_kernel_sizes,
offset_head_num_filters, offset_head_kernel_sizes)
class KeypointEstimationParams(
collections.namedtuple('KeypointEstimationParams', [
'task_name', 'class_id', 'keypoint_indices', 'classification_loss',
'localization_loss', 'keypoint_labels', 'keypoint_std_dev',
'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight',
'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold',
'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight',
'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale',
'candidate_search_scale', 'candidate_ranking_mode',
'offset_peak_radius', 'per_keypoint_offset', 'predict_depth',
'per_keypoint_depth', 'keypoint_depth_loss_weight',
'score_distance_offset', 'clip_out_of_frame_keypoints',
'rescore_instances', 'heatmap_head_num_filters',
'heatmap_head_kernel_sizes', 'offset_head_num_filters',
'offset_head_kernel_sizes', 'regress_head_num_filters',
'regress_head_kernel_sizes', 'score_distance_multiplier',
'std_dev_multiplier', 'rescoring_threshold', 'gaussian_denom_ratio',
'argmax_postprocessing'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the keypoint estimation task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
task_name,
class_id,
keypoint_indices,
classification_loss,
localization_loss,
keypoint_labels=None,
keypoint_std_dev=None,
keypoint_heatmap_loss_weight=1.0,
keypoint_offset_loss_weight=1.0,
keypoint_regression_loss_weight=1.0,
keypoint_candidate_score_threshold=0.1,
heatmap_bias_init=-2.19,
num_candidates_per_keypoint=100,
task_loss_weight=1.0,
peak_max_pool_kernel_size=3,
unmatched_keypoint_score=0.1,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance',
offset_peak_radius=0,
per_keypoint_offset=False,
predict_depth=False,
per_keypoint_depth=False,
keypoint_depth_loss_weight=1.0,
score_distance_offset=1e-6,
clip_out_of_frame_keypoints=False,
rescore_instances=False,
heatmap_head_num_filters=(256),
heatmap_head_kernel_sizes=(3),
offset_head_num_filters=(256),
offset_head_kernel_sizes=(3),
regress_head_num_filters=(256),
regress_head_kernel_sizes=(3),
score_distance_multiplier=0.1,
std_dev_multiplier=1.0,
rescoring_threshold=0.0,
argmax_postprocessing=False,
gaussian_denom_ratio=0.1):
"""Constructor with default values for KeypointEstimationParams.
Args:
task_name: string, the name of the task this namedtuple corresponds to.
Note that it should be an unique identifier of the task.
class_id: int, the ID of the class that contains the target keypoints to
considered in this task. For example, if the task is human pose
estimation, the class id should correspond to the "human" class. Note
that the ID is 0-based, meaning that class 0 corresponds to the first
non-background object class.
keypoint_indices: A list of integers representing the indicies of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
keypoint_labels: A list of strings representing the label text of each
keypoint, e.g. "nose", 'left_shoulder". Note that the length of this
list should be equal to keypoint_indices.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap. It is to provide
the flexibility of using different sizes of Gaussian kernel for each
keypoint class.
keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.
keypoint_offset_loss_weight: float, The weight for the keypoint offsets
loss.
keypoint_regression_loss_weight: float, The weight for keypoint regression
loss. Note that the loss is dependent on the input image size, since we
penalize the raw height and width. This constant may need to be adjusted
depending on the input size.
keypoint_candidate_score_threshold: float, The heatmap score threshold for
a keypoint to become a valid candidate.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the class prediction head. If set to None, the bias is
initialized with zeros.
num_candidates_per_keypoint: The maximum number of candidates to retrieve
for each keypoint.
task_loss_weight: float, the weight of the keypoint estimation loss.
peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak
score locations in a neighborhood (independently for each keypoint
types).
unmatched_keypoint_score: The default score to use for regressed keypoints
that are not successfully snapped to a nearby candidate.
box_scale: The multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints).
candidate_search_scale: The scale parameter that multiplies the largest
dimension of a bounding box. The resulting distance becomes a search
radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio',
'score_scaled_distance_ratio', 'gaussian_weighted'] indicating how to
select the keypoint candidate.
offset_peak_radius: The radius (in the unit of output pixel) around
groundtruth heatmap peak to assign the offset targets. If set 0, then
the offset target will only be assigned to the heatmap peak (same
behavior as the original paper).
per_keypoint_offset: A bool indicates whether to assign offsets for each
keypoint channel separately. If set False, the output offset target has
the shape [batch_size, out_height, out_width, 2] (same behavior as the
original paper). If set True, the output offset target has the shape
[batch_size, out_height, out_width, 2 * num_keypoints] (recommended when
the offset_peak_radius is not zero).
predict_depth: A bool indicates whether to predict the depth of each
keypoints.
per_keypoint_depth: A bool indicates whether the model predicts the depth
of each keypoints in independent channels. Similar to
per_keypoint_offset but for the keypoint depth.
keypoint_depth_loss_weight: The weight of the keypoint depth loss.
score_distance_offset: The distance offset to apply in the denominator
when candidate_ranking_mode is 'score_distance_ratio'. The metric to
maximize in this scenario is score / (distance + score_distance_offset).
Larger values of score_distance_offset make the keypoint score gain more
relative importance.
clip_out_of_frame_keypoints: Whether keypoints outside the image frame
should be clipped back to the image boundary. If True, the keypoints
that are clipped have scores set to 0.0.
rescore_instances: Whether to rescore instances based on a combination of
detection score and keypoint scores.
heatmap_head_num_filters: filter numbers of the convolutional layers used
by the keypoint heatmap prediction head.
heatmap_head_kernel_sizes: kernel size of the convolutional layers used
by the keypoint heatmap prediction head.
offset_head_num_filters: filter numbers of the convolutional layers used
by the keypoint offset prediction head.
offset_head_kernel_sizes: kernel size of the convolutional layers used
by the keypoint offset prediction head.
regress_head_num_filters: filter numbers of the convolutional layers used
by the keypoint regression prediction head.
regress_head_kernel_sizes: kernel size of the convolutional layers used
by the keypoint regression prediction head.
score_distance_multiplier: A scalar used to multiply the bounding box size
to be used as the offset in the score-to-distance-ratio formula.
std_dev_multiplier: A scalar used to multiply the standard deviation to
control the Gaussian kernel which used to weight the candidates.
rescoring_threshold: A scalar used when "rescore_instances" is set to
True. The detection score of an instance is set to be the average over
the scores of the keypoints which their scores higher than the
threshold.
argmax_postprocessing: Whether to use the keypoint postprocessing logic
that replaces the topk op with argmax. Usually used when exporting the
model for predicting keypoints of multiple instances in the browser.
gaussian_denom_ratio: The ratio used to multiply the image size to
determine the denominator of the Gaussian formula. Only applicable when
the candidate_ranking_mode is set to be 'gaussian_weighted_const'.
Returns:
An initialized KeypointEstimationParams namedtuple.
"""
return super(KeypointEstimationParams, cls).__new__(
cls, task_name, class_id, keypoint_indices, classification_loss,
localization_loss, keypoint_labels, keypoint_std_dev,
keypoint_heatmap_loss_weight, keypoint_offset_loss_weight,
keypoint_regression_loss_weight, keypoint_candidate_score_threshold,
heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight,
peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale,
candidate_search_scale, candidate_ranking_mode, offset_peak_radius,
per_keypoint_offset, predict_depth, per_keypoint_depth,
keypoint_depth_loss_weight, score_distance_offset,
clip_out_of_frame_keypoints, rescore_instances,
heatmap_head_num_filters, heatmap_head_kernel_sizes,
offset_head_num_filters, offset_head_kernel_sizes,
regress_head_num_filters, regress_head_kernel_sizes,
score_distance_multiplier, std_dev_multiplier, rescoring_threshold,
argmax_postprocessing, gaussian_denom_ratio)
class ObjectCenterParams(
collections.namedtuple('ObjectCenterParams', [
'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init',
'min_box_overlap_iou', 'max_box_predictions', 'use_labeled_classes',
'keypoint_weights_for_center', 'center_head_num_filters',
'center_head_kernel_sizes', 'peak_max_pool_kernel_size'
])):
"""Namedtuple to store object center prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
object_center_loss_weight,
heatmap_bias_init=-2.19,
min_box_overlap_iou=0.7,
max_box_predictions=100,
use_labeled_classes=False,
keypoint_weights_for_center=None,
center_head_num_filters=(256),
center_head_kernel_sizes=(3),
peak_max_pool_kernel_size=3):
"""Constructor with default values for ObjectCenterParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
object_center_loss_weight: float, The weight for the object center loss.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the object center prediction head. If set to None, the bias is
initialized with zeros.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
max_box_predictions: int, the maximum number of boxes to predict.
use_labeled_classes: boolean, compute the loss only labeled classes.
keypoint_weights_for_center: (optional) The keypoint weights used for
calculating the location of object center. If provided, the number of
weights need to be the same as the number of keypoints. The object
center is calculated by the weighted mean of the keypoint locations. If
not provided, the object center is determined by the center of the
bounding box (default behavior).
center_head_num_filters: filter numbers of the convolutional layers used
by the object center prediction head.
center_head_kernel_sizes: kernel size of the convolutional layers used
by the object center prediction head.
peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak
score locations in a neighborhood for the object detection heatmap.
Returns:
An initialized ObjectCenterParams namedtuple.
"""
return super(ObjectCenterParams,
cls).__new__(cls, classification_loss,
object_center_loss_weight, heatmap_bias_init,
min_box_overlap_iou, max_box_predictions,
use_labeled_classes, keypoint_weights_for_center,
center_head_num_filters, center_head_kernel_sizes,
peak_max_pool_kernel_size)
class MaskParams(
collections.namedtuple('MaskParams', [
'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width',
'score_threshold', 'heatmap_bias_init', 'mask_head_num_filters',
'mask_head_kernel_sizes'
])):
"""Namedtuple to store mask prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
task_loss_weight=1.0,
mask_height=256,
mask_width=256,
score_threshold=0.5,
heatmap_bias_init=-2.19,
mask_head_num_filters=(256),
mask_head_kernel_sizes=(3)):
"""Constructor with default values for MaskParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the semantic segmentation predictions in CenterNet.
task_loss_weight: float, The loss weight for the segmentation task.
mask_height: The height of the resized instance segmentation mask.
mask_width: The width of the resized instance segmentation mask.
score_threshold: The threshold at which to convert predicted mask
probabilities (after passing through sigmoid) into foreground pixels.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the semantic segmentation prediction head. If set to None, the
bias is initialized with zeros.
mask_head_num_filters: filter numbers of the convolutional layers used
by the mask prediction head.
mask_head_kernel_sizes: kernel size of the convolutional layers used
by the mask prediction head.
Returns:
An initialized MaskParams namedtuple.
"""
return super(MaskParams,
cls).__new__(cls, classification_loss,
task_loss_weight, mask_height, mask_width,
score_threshold, heatmap_bias_init,
mask_head_num_filters, mask_head_kernel_sizes)
class DensePoseParams(
collections.namedtuple('DensePoseParams', [
'class_id', 'classification_loss', 'localization_loss',
'part_loss_weight', 'coordinate_loss_weight', 'num_parts',
'task_loss_weight', 'upsample_to_input_res', 'upsample_method',
'heatmap_bias_init'
])):
"""Namedtuple to store DensePose prediction related parameters."""
__slots__ = ()
def __new__(cls,
class_id,
classification_loss,
localization_loss,
part_loss_weight=1.0,
coordinate_loss_weight=1.0,
num_parts=24,
task_loss_weight=1.0,
upsample_to_input_res=True,
upsample_method='bilinear',
heatmap_bias_init=-2.19):
"""Constructor with default values for DensePoseParams.
Args:
class_id: the ID of the class that contains the DensePose groundtruth.
This should typically correspond to the "person" class. Note that the ID
is 0-based, meaning that class 0 corresponds to the first non-background
object class.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the body part predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the surface coordinate regression in CenterNet.
part_loss_weight: The loss weight to apply to part prediction.
coordinate_loss_weight: The loss weight to apply to surface coordinate
prediction.
num_parts: The number of DensePose parts to predict.
task_loss_weight: float, the loss weight for the DensePose task.
upsample_to_input_res: Whether to upsample the DensePose feature maps to
the input resolution before applying loss. Note that the prediction
outputs are still at the standard CenterNet output stride.
upsample_method: Method for upsampling DensePose feature maps. Options are
either 'bilinear' or 'nearest'). This takes no effect when
`upsample_to_input_res` is False.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the part prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized DensePoseParams namedtuple.
"""
return super(DensePoseParams,
cls).__new__(cls, class_id, classification_loss,
localization_loss, part_loss_weight,
coordinate_loss_weight, num_parts,
task_loss_weight, upsample_to_input_res,
upsample_method, heatmap_bias_init)
class TrackParams(
collections.namedtuple('TrackParams', [
'num_track_ids', 'reid_embed_size', 'num_fc_layers',
'classification_loss', 'task_loss_weight'
])):
"""Namedtuple to store tracking prediction related parameters."""
__slots__ = ()
def __new__(cls,
num_track_ids,
reid_embed_size,
num_fc_layers,
classification_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
num_track_ids: int. The maximum track ID in the dataset. Used for ReID
embedding classification task.
reid_embed_size: int. The embedding size for ReID task.
num_fc_layers: int. The number of (fully-connected, batch-norm, relu)
layers for track ID classification head.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the ReID embedding in CenterNet.
task_loss_weight: float, the loss weight for the tracking task.
Returns:
An initialized TrackParams namedtuple.
"""
return super(TrackParams,
cls).__new__(cls, num_track_ids, reid_embed_size,
num_fc_layers, classification_loss,
task_loss_weight)
class TemporalOffsetParams(
collections.namedtuple('TemporalOffsetParams', [
'localization_loss', 'task_loss_weight'
])):
"""Namedtuple to store temporal offset related parameters."""
__slots__ = ()
def __new__(cls,
localization_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
localization_loss: an object_detection.core.losses.Loss object to
compute the loss for the temporal offset in CenterNet.
task_loss_weight: float, the loss weight for the temporal offset
task.
Returns:
An initialized TemporalOffsetParams namedtuple.
"""
return super(TemporalOffsetParams,
cls).__new__(cls, localization_loss, task_loss_weight)
# The following constants are used to generate the keys of the
# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch
# class.
DETECTION_TASK = 'detection_task'
OBJECT_CENTER = 'object_center'
BOX_SCALE = 'box/scale'
BOX_OFFSET = 'box/offset'
KEYPOINT_REGRESSION = 'keypoint/regression'
KEYPOINT_HEATMAP = 'keypoint/heatmap'
KEYPOINT_OFFSET = 'keypoint/offset'
KEYPOINT_DEPTH = 'keypoint/depth'
SEGMENTATION_TASK = 'segmentation_task'
SEGMENTATION_HEATMAP = 'segmentation/heatmap'
DENSEPOSE_TASK = 'densepose_task'
DENSEPOSE_HEATMAP = 'densepose/heatmap'
DENSEPOSE_REGRESSION = 'densepose/regression'
LOSS_KEY_PREFIX = 'Loss'
TRACK_TASK = 'track_task'
TRACK_REID = 'track/reid'
TEMPORALOFFSET_TASK = 'temporal_offset_task'
TEMPORAL_OFFSET = 'track/offset'
def get_keypoint_name(task_name, head_name):
return '%s/%s' % (task_name, head_name)
def get_num_instances_from_weights(groundtruth_weights_list):
"""Computes the number of instances/boxes from the weights in a batch.
Args:
groundtruth_weights_list: A list of float tensors with shape
[max_num_instances] representing whether there is an actual instance in
the image (with non-zero value) or is padded to match the
max_num_instances (with value 0.0). The list represents the batch
dimension.
Returns:
A scalar integer tensor incidating how many instances/boxes are in the
images in the batch. Note that this function is usually used to normalize
the loss so the minimum return value is 1 to avoid weird behavior.
"""
num_instances = tf.reduce_sum(
[tf.math.count_nonzero(w) for w in groundtruth_weights_list])
num_instances = tf.maximum(num_instances, 1)
return num_instances
class CenterNetMetaArch(model.DetectionModel):
"""The CenterNet meta architecture [1].
[1]: https://arxiv.org/abs/1904.07850
"""
def __init__(self,
is_training,
add_summaries,
num_classes,
feature_extractor,
image_resizer_fn,
object_center_params,
object_detection_params=None,
keypoint_params_dict=None,
mask_params=None,
densepose_params=None,
track_params=None,
temporal_offset_params=None,
use_depthwise=False,
compute_heatmap_sparse=False,
non_max_suppression_fn=None,
unit_height_conv=False,
output_prediction_dict=False):
"""Initializes a CenterNet model.
Args:
is_training: Set to True if this model is being built for training.
add_summaries: Whether to add tf summaries in the model.
num_classes: int, The number of classes that the model should predict.
feature_extractor: A CenterNetFeatureExtractor to use to extract features
from an image.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within the
resized image tensor as the resized image tensor could be padded. See
builders/image_resizer_builder.py.
object_center_params: An ObjectCenterParams namedtuple. This object holds
the hyper-parameters for object center prediction. This is required by
either object detection or keypoint estimation tasks.
object_detection_params: An ObjectDetectionParams namedtuple. This object
holds the hyper-parameters necessary for object detection. Please see
the class definition for more details.
keypoint_params_dict: A dictionary that maps from task name to the
corresponding KeypointEstimationParams namedtuple. This object holds the
hyper-parameters necessary for multiple keypoint estimations. Please
see the class definition for more details.
mask_params: A MaskParams namedtuple. This object
holds the hyper-parameters for segmentation. Please see the class
definition for more details.
densepose_params: A DensePoseParams namedtuple. This object holds the
hyper-parameters for DensePose prediction. Please see the class
definition for more details. Note that if this is provided, it is
expected that `mask_params` is also provided.
track_params: A TrackParams namedtuple. This object
holds the hyper-parameters for tracking. Please see the class
definition for more details.
temporal_offset_params: A TemporalOffsetParams namedtuple. This object
holds the hyper-parameters for offset prediction based tracking.
use_depthwise: If true, all task heads will be constructed using
separable_conv. Otherwise, standard convoltuions will be used.
compute_heatmap_sparse: bool, whether or not to use the sparse version of
the Op that computes the center heatmaps. The sparse version scales
better with number of channels in the heatmap, but in some cases is
known to cause an OOM error. See b/170989061.
non_max_suppression_fn: Optional Non Max Suppression function to apply.
unit_height_conv: If True, Conv2Ds in prediction heads have asymmetric
kernels with height=1.
output_prediction_dict: If true, combines all items from the dictionary
returned by predict() function into the output of postprocess().
"""
assert object_detection_params or keypoint_params_dict
# Shorten the name for convenience and better formatting.
self._is_training = is_training
# The Objects as Points paper attaches loss functions to multiple
# (`num_feature_outputs`) feature maps in the the backbone. E.g.
# for the hourglass backbone, `num_feature_outputs` is 2.
self._num_classes = num_classes
self._feature_extractor = feature_extractor
self._num_feature_outputs = feature_extractor.num_feature_outputs
self._stride = self._feature_extractor.out_stride
self._image_resizer_fn = image_resizer_fn
self._center_params = object_center_params
self._od_params = object_detection_params
self._kp_params_dict = keypoint_params_dict
self._mask_params = mask_params
if densepose_params is not None and mask_params is None:
raise ValueError('To run DensePose prediction, `mask_params` must also '
'be supplied.')
self._densepose_params = densepose_params
self._track_params = track_params
self._temporal_offset_params = temporal_offset_params
self._use_depthwise = use_depthwise
self._compute_heatmap_sparse = compute_heatmap_sparse
self._output_prediction_dict = output_prediction_dict
# subclasses may not implement the unit_height_conv arg, so only provide it
# as a kwarg if it is True.
kwargs = {'unit_height_conv': unit_height_conv} if unit_height_conv else {}
# Construct the prediction head nets.
self._prediction_head_dict = self._construct_prediction_heads(
num_classes,
self._num_feature_outputs,
class_prediction_bias_init=self._center_params.heatmap_bias_init,
**kwargs)
# Initialize the target assigners.
self._target_assigner_dict = self._initialize_target_assigners(
stride=self._stride,
min_box_overlap_iou=self._center_params.min_box_overlap_iou)
# Will be used in VOD single_frame_meta_arch for tensor reshape.
self._batched_prediction_tensor_names = []
self._non_max_suppression_fn = non_max_suppression_fn
super(CenterNetMetaArch, self).__init__(num_classes)
def set_trainability_by_layer_traversal(self, trainable):
"""Sets trainability layer by layer.
The commonly-seen `model.trainable = False` method does not traverse
the children layer. For example, if the parent is not trainable, we won't
be able to set individual layers as trainable/non-trainable differentially.
Args:
trainable: (bool) Setting this for the model layer by layer except for
the parent itself.
"""
for layer in self._flatten_layers(include_self=False):
layer.trainable = trainable
@property
def prediction_head_dict(self):
return self._prediction_head_dict
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
def _make_prediction_net_list(self, num_feature_outputs, num_out_channels,
kernel_sizes=(3), num_filters=(256),
bias_fill=None, name=None,
unit_height_conv=False):
prediction_net_list = []
for i in range(num_feature_outputs):
prediction_net_list.append(
make_prediction_net(
num_out_channels,
kernel_sizes=kernel_sizes,
num_filters=num_filters,
bias_fill=bias_fill,
use_depthwise=self._use_depthwise,
name='{}_{}'.format(name, i) if name else name,
unit_height_conv=unit_height_conv))
return prediction_net_list
def _construct_prediction_heads(self, num_classes, num_feature_outputs,
class_prediction_bias_init,
unit_height_conv=False):
"""Constructs the prediction heads based on the specific parameters.
Args:
num_classes: An integer indicating how many classes in total to predict.
num_feature_outputs: An integer indicating how many feature outputs to use
for calculating the loss. The Objects as Points paper attaches loss
functions to multiple (`num_feature_outputs`) feature maps in the the
backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.
class_prediction_bias_init: float, the initial value of bias in the
convolutional kernel of the class prediction head. If set to None, the
bias is initialized with zeros.
unit_height_conv: If True, Conv2Ds have asymmetric kernels with height=1.
Returns:
A dictionary of keras modules generated by calling make_prediction_net
function. It will also create and set a private member of the class when
learning the tracking task.
"""
prediction_heads = {}
prediction_heads[OBJECT_CENTER] = self._make_prediction_net_list(
num_feature_outputs,
num_classes,
kernel_sizes=self._center_params.center_head_kernel_sizes,
num_filters=self._center_params.center_head_num_filters,
bias_fill=class_prediction_bias_init,
name='center',
unit_height_conv=unit_height_conv)
if self._od_params is not None:
prediction_heads[BOX_SCALE] = self._make_prediction_net_list(
num_feature_outputs,
NUM_SIZE_CHANNELS,
kernel_sizes=self._od_params.scale_head_kernel_sizes,
num_filters=self._od_params.scale_head_num_filters,
name='box_scale',
unit_height_conv=unit_height_conv)
prediction_heads[BOX_OFFSET] = self._make_prediction_net_list(
num_feature_outputs,
NUM_OFFSET_CHANNELS,
kernel_sizes=self._od_params.offset_head_kernel_sizes,
num_filters=self._od_params.offset_head_num_filters,
name='box_offset',
unit_height_conv=unit_height_conv)
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
num_keypoints = len(kp_params.keypoint_indices)
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_HEATMAP)] = self._make_prediction_net_list(
num_feature_outputs,
num_keypoints,
kernel_sizes=kp_params.heatmap_head_kernel_sizes,
num_filters=kp_params.heatmap_head_num_filters,
bias_fill=kp_params.heatmap_bias_init,
name='kpt_heatmap',
unit_height_conv=unit_height_conv)
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_REGRESSION)] = self._make_prediction_net_list(
num_feature_outputs,
NUM_OFFSET_CHANNELS * num_keypoints,
kernel_sizes=kp_params.regress_head_kernel_sizes,
num_filters=kp_params.regress_head_num_filters,
name='kpt_regress',
unit_height_conv=unit_height_conv)
if kp_params.per_keypoint_offset:
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(
num_feature_outputs,
NUM_OFFSET_CHANNELS * num_keypoints,
kernel_sizes=kp_params.offset_head_kernel_sizes,
num_filters=kp_params.offset_head_num_filters,
name='kpt_offset',
unit_height_conv=unit_height_conv)
else:
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(
num_feature_outputs,
NUM_OFFSET_CHANNELS,
kernel_sizes=kp_params.offset_head_kernel_sizes,
num_filters=kp_params.offset_head_num_filters,
name='kpt_offset',
unit_height_conv=unit_height_conv)
if kp_params.predict_depth:
num_depth_channel = (
num_keypoints if kp_params.per_keypoint_depth else 1)
prediction_heads[get_keypoint_name(
task_name, KEYPOINT_DEPTH)] = self._make_prediction_net_list(
num_feature_outputs, num_depth_channel, name='kpt_depth',
unit_height_conv=unit_height_conv)
if self._mask_params is not None:
prediction_heads[SEGMENTATION_HEATMAP] = self._make_prediction_net_list(
num_feature_outputs,
num_classes,
kernel_sizes=self._mask_params.mask_head_kernel_sizes,
num_filters=self._mask_params.mask_head_num_filters,
bias_fill=self._mask_params.heatmap_bias_init,
name='seg_heatmap',
unit_height_conv=unit_height_conv)
if self._densepose_params is not None:
prediction_heads[DENSEPOSE_HEATMAP] = self._make_prediction_net_list(
num_feature_outputs,
self._densepose_params.num_parts,
bias_fill=self._densepose_params.heatmap_bias_init,
name='dense_pose_heatmap',
unit_height_conv=unit_height_conv)
prediction_heads[DENSEPOSE_REGRESSION] = self._make_prediction_net_list(
num_feature_outputs,
2 * self._densepose_params.num_parts,
name='dense_pose_regress',
unit_height_conv=unit_height_conv)
if self._track_params is not None:
prediction_heads[TRACK_REID] = self._make_prediction_net_list(
num_feature_outputs,
self._track_params.reid_embed_size,
name='track_reid',
unit_height_conv=unit_height_conv)
# Creates a classification network to train object embeddings by learning
# a projection from embedding space to object track ID space.
self.track_reid_classification_net = tf.keras.Sequential()
for _ in range(self._track_params.num_fc_layers - 1):
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.reid_embed_size))
self.track_reid_classification_net.add(
tf.keras.layers.BatchNormalization())
self.track_reid_classification_net.add(tf.keras.layers.ReLU())
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.num_track_ids))
if self._temporal_offset_params is not None:
prediction_heads[TEMPORAL_OFFSET] = self._make_prediction_net_list(
num_feature_outputs, NUM_OFFSET_CHANNELS, name='temporal_offset',
unit_height_conv=unit_height_conv)
return prediction_heads
def _initialize_target_assigners(self, stride, min_box_overlap_iou):
"""Initializes the target assigners and puts them in a dictionary.
Args:
stride: An integer indicating the stride of the image.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
Returns:
A dictionary of initialized target assigners for each task.
"""
target_assigners = {}
keypoint_weights_for_center = (
self._center_params.keypoint_weights_for_center)
if not keypoint_weights_for_center:
target_assigners[OBJECT_CENTER] = (
cn_assigner.CenterNetCenterHeatmapTargetAssigner(
stride, min_box_overlap_iou, self._compute_heatmap_sparse))
self._center_from_keypoints = False
else:
# Determining the object center location by keypoint location is only
# supported when there is exactly one keypoint prediction task and no
# object detection task is specified.
assert len(self._kp_params_dict) == 1 and self._od_params is None
kp_params = next(iter(self._kp_params_dict.values()))
# The number of keypoint_weights_for_center needs to be the same as the
# number of keypoints.
assert len(keypoint_weights_for_center) == len(kp_params.keypoint_indices)
target_assigners[OBJECT_CENTER] = (
cn_assigner.CenterNetCenterHeatmapTargetAssigner(
stride,
min_box_overlap_iou,
self._compute_heatmap_sparse,
keypoint_class_id=kp_params.class_id,
keypoint_indices=kp_params.keypoint_indices,
keypoint_weights_for_center=keypoint_weights_for_center))
self._center_from_keypoints = True
if self._od_params is not None:
target_assigners[DETECTION_TASK] = (
cn_assigner.CenterNetBoxTargetAssigner(stride))
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
target_assigners[task_name] = (
cn_assigner.CenterNetKeypointTargetAssigner(
stride=stride,
class_id=kp_params.class_id,
keypoint_indices=kp_params.keypoint_indices,
keypoint_std_dev=kp_params.keypoint_std_dev,
peak_radius=kp_params.offset_peak_radius,
per_keypoint_offset=kp_params.per_keypoint_offset,
compute_heatmap_sparse=self._compute_heatmap_sparse,
per_keypoint_depth=kp_params.per_keypoint_depth))
if self._mask_params is not None:
target_assigners[SEGMENTATION_TASK] = (
cn_assigner.CenterNetMaskTargetAssigner(stride, boxes_scale=1.05))
if self._densepose_params is not None:
dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride
target_assigners[DENSEPOSE_TASK] = (
cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride))
if self._track_params is not None:
target_assigners[TRACK_TASK] = (
cn_assigner.CenterNetTrackTargetAssigner(
stride, self._track_params.num_track_ids))
if self._temporal_offset_params is not None:
target_assigners[TEMPORALOFFSET_TASK] = (
cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride))
return target_assigners
def _compute_object_center_loss(self, input_height, input_width,
object_center_predictions, per_pixel_weights,
maximum_normalized_coordinate=1.1):
"""Computes the object center loss.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_center_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_classes] representing the object center
feature maps.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
A float scalar tensor representing the object center loss per instance.
"""
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
if self._center_params.use_labeled_classes:
gt_labeled_classes_list = self.groundtruth_lists(
fields.InputDataFields.groundtruth_labeled_classes)
batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)
batch_labeled_classes_shape = tf.shape(batch_labeled_classes)
batch_labeled_classes = tf.reshape(
batch_labeled_classes,
[batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]])
per_pixel_weights = per_pixel_weights * batch_labeled_classes
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[OBJECT_CENTER]
if self._center_from_keypoints:
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
heatmap_targets = assigner.assign_center_targets_from_keypoints(
height=input_height,
width=input_width,
gt_classes_list=gt_classes_list,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
maximum_normalized_coordinate=maximum_normalized_coordinate)
else:
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
heatmap_targets = assigner.assign_center_targets_from_boxes(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list,
maximum_normalized_coordinate=maximum_normalized_coordinate)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
loss = 0.0
object_center_loss = self._center_params.classification_loss
# Loop through each feature output head.
for pred in object_center_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += object_center_loss(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_center_predictions)) * num_boxes)
return loss_per_instance
def _compute_object_detection_losses(self, input_height, input_width,
prediction_dict, per_pixel_weights,
maximum_normalized_coordinate=1.1):
"""Computes the weighted object detection losses.
This wrapper function calls the function which computes the losses for
object detection task and applies corresponding weights to the losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by
"predict" function. See "predict" function for more detailed
description.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
object detection task:
BOX_SCALE: the weighted scale (height/width) loss.
BOX_OFFSET: the weighted object offset loss.
"""
od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss(
scale_predictions=prediction_dict[BOX_SCALE],
offset_predictions=prediction_dict[BOX_OFFSET],
input_height=input_height,
input_width=input_width,
maximum_normalized_coordinate=maximum_normalized_coordinate)
loss_dict = {}
loss_dict[BOX_SCALE] = (
self._od_params.scale_loss_weight * od_scale_loss)
loss_dict[BOX_OFFSET] = (
self._od_params.offset_loss_weight * od_offset_loss)
return loss_dict
def _compute_box_scale_and_offset_loss(self, input_height, input_width,
scale_predictions, offset_predictions,
maximum_normalized_coordinate=1.1):
"""Computes the scale loss of the object detection task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
scale_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object scale (i.e height and width).
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object offset.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
A tuple of two losses:
scale_loss: A float scalar tensor representing the object height/width
loss normalized by total number of boxes.
offset_loss: A float scalar tensor representing the object offset loss
normalized by total number of boxes
"""
# TODO(vighneshb) Explore a size invariant version of scale loss.
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
num_predictions = float(len(scale_predictions))
assigner = self._target_assigner_dict[DETECTION_TASK]
(batch_indices, batch_height_width_targets, batch_offset_targets,
batch_weights) = assigner.assign_size_and_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list,
maximum_normalized_coordinate=maximum_normalized_coordinate)
batch_weights = tf.expand_dims(batch_weights, -1)
scale_loss = 0
offset_loss = 0
localization_loss_fn = self._od_params.localization_loss
for scale_pred, offset_pred in zip(scale_predictions, offset_predictions):
# Compute the scale loss.
scale_pred = cn_assigner.get_batch_predictions_from_indices(
scale_pred, batch_indices)
scale_loss += localization_loss_fn(
scale_pred, batch_height_width_targets, weights=batch_weights)
# Compute the offset loss.
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += localization_loss_fn(
offset_pred, batch_offset_targets, weights=batch_weights)
scale_loss = tf.reduce_sum(scale_loss) / (
num_predictions * num_boxes)
offset_loss = tf.reduce_sum(offset_loss) / (
num_predictions * num_boxes)
return scale_loss, offset_loss
def _compute_keypoint_estimation_losses(self, task_name, input_height,
input_width, prediction_dict,
per_pixel_weights):
"""Computes the weighted keypoint losses."""
kp_params = self._kp_params_dict[task_name]
heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)
offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)
regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)
depth_key = get_keypoint_name(task_name, KEYPOINT_DEPTH)
heatmap_loss = self._compute_kp_heatmap_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
heatmap_predictions=prediction_dict[heatmap_key],
classification_loss_fn=kp_params.classification_loss,
per_pixel_weights=per_pixel_weights)
offset_loss = self._compute_kp_offset_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
offset_predictions=prediction_dict[offset_key],
localization_loss_fn=kp_params.localization_loss)
reg_loss = self._compute_kp_regression_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
regression_predictions=prediction_dict[regression_key],
localization_loss_fn=kp_params.localization_loss)
loss_dict = {}
loss_dict[heatmap_key] = (
kp_params.keypoint_heatmap_loss_weight * heatmap_loss)
loss_dict[offset_key] = (
kp_params.keypoint_offset_loss_weight * offset_loss)
loss_dict[regression_key] = (
kp_params.keypoint_regression_loss_weight * reg_loss)
if kp_params.predict_depth:
depth_loss = self._compute_kp_depth_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
depth_predictions=prediction_dict[depth_key],
localization_loss_fn=kp_params.localization_loss)
loss_dict[depth_key] = kp_params.keypoint_depth_loss_weight * depth_loss
return loss_dict
def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,
heatmap_predictions, classification_loss_fn,
per_pixel_weights):
"""Computes the heatmap loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
heatmap_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_keypoints] representing the prediction heads
of the model for keypoint heatmap.
classification_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
loss: A float scalar tensor representing the object keypoint heatmap loss
normalized by number of instances.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
assigner = self._target_assigner_dict[task_name]
(keypoint_heatmap, num_instances_per_kp_type,
valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
flattened_valid_mask = _flatten_spatial_dimensions(valid_mask_batch)
flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)
# Sum over the number of instances per keypoint types to get the total
# number of keypoints. Note that this is used to normalized the loss and we
# keep the minimum value to be 1 to avoid generating weird loss value when
# no keypoint is in the image batch.
num_instances = tf.maximum(
tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),
1.0)
loss = 0.0
# Loop through each feature output head.
for pred in heatmap_predictions:
pred = _flatten_spatial_dimensions(pred)
unweighted_loss = classification_loss_fn(
pred,
flattened_heapmap_targets,
weights=tf.ones_like(per_pixel_weights))
# Apply the weights after the loss function to have full control over it.
loss += unweighted_loss * per_pixel_weights * flattened_valid_mask
loss = tf.reduce_sum(loss) / (
float(len(heatmap_predictions)) * num_instances)
return loss
def _compute_kp_offset_loss(self, input_height, input_width, task_name,
offset_predictions, localization_loss_fn):
"""Computes the offset loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for keypoint offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint offset loss
normalized by number of total keypoints.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_offsets,
batch_weights) = assigner.assign_keypoints_offset_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list)
# Keypoint offset loss.
loss = 0.0
for prediction in offset_predictions:
batch_size, out_height, out_width, channels = _get_shape(prediction, 4)
if channels > 2:
prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
prediction = cn_assigner.get_batch_predictions_from_indices(
prediction, batch_indices)
# The dimensions passed are not as per the doc string but the loss
# still computes the correct value.
unweighted_loss = localization_loss_fn(
prediction,
batch_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(offset_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_kp_regression_loss(self, input_height, input_width, task_name,
regression_predictions, localization_loss_fn):
"""Computes the keypoint regression loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
regression_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_keypoints] representing the prediction
heads of the model for keypoint regression offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint regression offset predictions in
CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint regression offset
loss normalized by number of total keypoints.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
# keypoint regression offset loss.
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_regression_offsets,
batch_weights) = assigner.assign_joint_regression_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list,
gt_boxes_list=gt_boxes_list)
loss = 0.0
for prediction in regression_predictions:
batch_size, out_height, out_width, _ = _get_shape(prediction, 4)
reshaped_prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
reg_prediction = cn_assigner.get_batch_predictions_from_indices(
reshaped_prediction, batch_indices)
unweighted_loss = localization_loss_fn(
reg_prediction,
batch_regression_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(regression_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_kp_depth_loss(self, input_height, input_width, task_name,
depth_predictions, localization_loss_fn):
"""Computes the loss of the keypoint depth estimation.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
depth_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 1 (or num_keypoints)] representing the prediction
heads of the model for keypoint depth.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint depth loss
normalized by number of total keypoints.
"""
kp_params = self._kp_params_dict[task_name]
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_keypoint_depths_list = self.groundtruth_lists(
fields.BoxListFields.keypoint_depths)
gt_keypoint_depth_weights_list = self.groundtruth_lists(
fields.BoxListFields.keypoint_depth_weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_depths,
batch_weights) = assigner.assign_keypoints_depth_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list,
gt_keypoint_depths_list=gt_keypoint_depths_list,
gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)
# Keypoint offset loss.
loss = 0.0
for prediction in depth_predictions:
if kp_params.per_keypoint_depth:
prediction = tf.expand_dims(prediction, axis=-1)
selected_depths = cn_assigner.get_batch_predictions_from_indices(
prediction, batch_indices)
# The dimensions passed are not as per the doc string but the loss
# still computes the correct value.
unweighted_loss = localization_loss_fn(
selected_depths,
batch_depths,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.squeeze(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(depth_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):
"""Computes all the losses associated with segmentation.
Args:
prediction_dict: The dictionary returned from the predict() method.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary with segmentation losses.
"""
segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]
mask_loss = self._compute_mask_loss(
segmentation_heatmap, per_pixel_weights)
losses = {
SEGMENTATION_HEATMAP: mask_loss
}
return losses
def _compute_mask_loss(self, segmentation_predictions,
per_pixel_weights):
"""Computes the mask loss.
Args:
segmentation_predictions: A list of float32 tensors of shape [batch_size,
out_height, out_width, num_classes].
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the mask loss.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)
gt_mask_weights_list = None
if self.groundtruth_has_field(fields.BoxListFields.mask_weights):
gt_mask_weights_list = self.groundtruth_lists(
fields.BoxListFields.mask_weights)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[SEGMENTATION_TASK]
heatmap_targets, heatmap_weight = assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list,
gt_mask_weights_list=gt_mask_weights_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
flattened_heatmap_mask = _flatten_spatial_dimensions(
heatmap_weight[:, :, :, tf.newaxis])
per_pixel_weights *= flattened_heatmap_mask
loss = 0.0
mask_loss_fn = self._mask_params.classification_loss
total_pixels_in_loss = tf.math.maximum(
tf.reduce_sum(per_pixel_weights), 1)
# Loop through each feature output head.
for pred in segmentation_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += mask_loss_fn(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
# TODO(ronnyvotel): Consider other ways to normalize loss.
total_loss = tf.reduce_sum(loss) / (
float(len(segmentation_predictions)) * total_pixels_in_loss)
return total_loss
def _compute_densepose_losses(self, input_height, input_width,
prediction_dict):
"""Computes the weighted DensePose losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by the
"predict" function. See the "predict" function for more detailed
description.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
the DensePose task:
DENSEPOSE_HEATMAP: the weighted part segmentation loss.
DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.
"""
dp_heatmap_loss, dp_regression_loss = (
self._compute_densepose_part_and_coordinate_losses(
input_height=input_height,
input_width=input_width,
part_predictions=prediction_dict[DENSEPOSE_HEATMAP],
surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION]))
loss_dict = {}
loss_dict[DENSEPOSE_HEATMAP] = (
self._densepose_params.part_loss_weight * dp_heatmap_loss)
loss_dict[DENSEPOSE_REGRESSION] = (
self._densepose_params.coordinate_loss_weight * dp_regression_loss)
return loss_dict
def _compute_densepose_part_and_coordinate_losses(
self, input_height, input_width, part_predictions,
surface_coord_predictions):
"""Computes the individual losses for the DensePose task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
part_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_parts].
surface_coord_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_parts].
Returns:
A tuple with two scalar loss tensors: part_prediction_loss and
surface_coord_loss.
"""
gt_dp_num_points_list = self.groundtruth_lists(
fields.BoxListFields.densepose_num_points)
gt_dp_part_ids_list = self.groundtruth_lists(
fields.BoxListFields.densepose_part_ids)
gt_dp_surface_coords_list = self.groundtruth_lists(
fields.BoxListFields.densepose_surface_coords)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[DENSEPOSE_TASK]
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
assigner.assign_part_and_coordinate_targets(
height=input_height,
width=input_width,
gt_dp_num_points_list=gt_dp_num_points_list,
gt_dp_part_ids_list=gt_dp_part_ids_list,
gt_dp_surface_coords_list=gt_dp_surface_coords_list,
gt_weights_list=gt_weights_list))
part_prediction_loss = 0
surface_coord_loss = 0
classification_loss_fn = self._densepose_params.classification_loss
localization_loss_fn = self._densepose_params.localization_loss
num_predictions = float(len(part_predictions))
num_valid_points = tf.math.count_nonzero(batch_weights)
num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)
for part_pred, surface_coord_pred in zip(part_predictions,
surface_coord_predictions):
# Potentially upsample the feature maps, so that better quality (i.e.
# higher res) groundtruth can be applied.
if self._densepose_params.upsample_to_input_res:
part_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
part_pred)
surface_coord_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
surface_coord_pred)
# Compute the part prediction loss.
part_pred = cn_assigner.get_batch_predictions_from_indices(
part_pred, batch_indices[:, 0:3])
part_prediction_loss += classification_loss_fn(
part_pred[:, tf.newaxis, :],
batch_part_ids[:, tf.newaxis, :],
weights=batch_weights[:, tf.newaxis, tf.newaxis])
# Compute the surface coordinate loss.
batch_size, out_height, out_width, _ = _get_shape(
surface_coord_pred, 4)
surface_coord_pred = tf.reshape(
surface_coord_pred, [batch_size, out_height, out_width, -1, 2])
surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(
surface_coord_pred, batch_indices)
surface_coord_loss += localization_loss_fn(
surface_coord_pred,
batch_surface_coords,
weights=batch_weights[:, tf.newaxis])
part_prediction_loss = tf.reduce_sum(part_prediction_loss) / (
num_predictions * num_valid_points)
surface_coord_loss = tf.reduce_sum(surface_coord_loss) / (
num_predictions * num_valid_points)
return part_prediction_loss, surface_coord_loss
def _compute_track_losses(self, input_height, input_width, prediction_dict):
"""Computes all the losses associated with tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with tracking losses.
"""
object_reid_predictions = prediction_dict[TRACK_REID]
embedding_loss = self._compute_track_embedding_loss(
input_height=input_height,
input_width=input_width,
object_reid_predictions=object_reid_predictions)
losses = {
TRACK_REID: embedding_loss
}
return losses
def _compute_track_embedding_loss(self, input_height, input_width,
object_reid_predictions):
"""Computes the object ReID loss.
The embedding is trained as a classification task where the target is the
ID of each track among all tracks in the whole dataset.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_reid_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, reid_embed_size] representing the object
embedding feature maps.
Returns:
A float scalar tensor representing the object ReID loss per instance.
"""
gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[TRACK_TASK]
batch_indices, batch_weights, track_targets = assigner.assign_track_targets(
height=input_height,
width=input_width,
gt_track_ids_list=gt_track_ids_list,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
loss = 0.0
object_reid_loss = self._track_params.classification_loss
# Loop through each feature output head.
for pred in object_reid_predictions:
embedding_pred = cn_assigner.get_batch_predictions_from_indices(
pred, batch_indices)
reid_classification = self.track_reid_classification_net(embedding_pred)
loss += object_reid_loss(
reid_classification, track_targets, weights=batch_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_reid_predictions)) * num_boxes)
return loss_per_instance
def _compute_temporal_offset_loss(self, input_height,
input_width, prediction_dict):
"""Computes the temporal offset loss for tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with track/temporal_offset losses.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_offsets_list = self.groundtruth_lists(
fields.BoxListFields.temporal_offsets)
gt_match_list = self.groundtruth_lists(
fields.BoxListFields.track_match_flags)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = tf.cast(
get_num_instances_from_weights(gt_weights_list), tf.float32)
offset_predictions = prediction_dict[TEMPORAL_OFFSET]
num_predictions = float(len(offset_predictions))
assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]
(batch_indices, batch_offset_targets,
batch_weights) = assigner.assign_temporal_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_offsets_list=gt_offsets_list,
gt_match_list=gt_match_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
offset_loss_fn = self._temporal_offset_params.localization_loss
loss_dict = {}
offset_loss = 0
for offset_pred in offset_predictions:
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += offset_loss_fn(offset_pred[:, None],
batch_offset_targets[:, None],
weights=batch_weights)
offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes)
loss_dict[TEMPORAL_OFFSET] = offset_loss
return loss_dict
def _should_clip_keypoints(self):
"""Returns a boolean indicating whether keypoint clipping should occur.
If there is only one keypoint task, clipping is controlled by the field
`clip_out_of_frame_keypoints`. If there are multiple keypoint tasks,
clipping logic is defined based on unanimous agreement of keypoint
parameters. If there is any ambiguity, clip_out_of_frame_keypoints is set
to False (default).
"""
kp_params_iterator = iter(self._kp_params_dict.values())
if len(self._kp_params_dict) == 1:
kp_params = next(kp_params_iterator)
return kp_params.clip_out_of_frame_keypoints
# Multi-task setting.
kp_params = next(kp_params_iterator)
should_clip = kp_params.clip_out_of_frame_keypoints
for kp_params in kp_params_iterator:
if kp_params.clip_out_of_frame_keypoints != should_clip:
return False
return should_clip
def _rescore_instances(self, classes, scores, keypoint_scores):
"""Rescores instances based on detection and keypoint scores.
Args:
classes: A [batch, max_detections] int32 tensor with detection classes.
scores: A [batch, max_detections] float32 tensor with detection scores.
keypoint_scores: A [batch, max_detections, total_num_keypoints] float32
tensor with keypoint scores.
Returns:
A [batch, max_detections] float32 tensor with possibly altered detection
scores.
"""
batch, max_detections, total_num_keypoints = (
shape_utils.combined_static_and_dynamic_shape(keypoint_scores))
classes_tiled = tf.tile(classes[:, :, tf.newaxis],
multiples=[1, 1, total_num_keypoints])
# TODO(yuhuic): Investigate whether this function will create subgraphs in
# tflite that will cause the model to run slower at inference.
for kp_params in self._kp_params_dict.values():
if not kp_params.rescore_instances:
continue
class_id = kp_params.class_id
keypoint_indices = kp_params.keypoint_indices
kpt_mask = tf.reduce_sum(
tf.one_hot(keypoint_indices, depth=total_num_keypoints), axis=0)
kpt_mask_tiled = tf.tile(kpt_mask[tf.newaxis, tf.newaxis, :],
multiples=[batch, max_detections, 1])
class_and_keypoint_mask = tf.math.logical_and(
classes_tiled == class_id,
kpt_mask_tiled == 1.0)
class_and_keypoint_mask_float = tf.cast(class_and_keypoint_mask,
dtype=tf.float32)
visible_keypoints = tf.math.greater(
keypoint_scores, kp_params.rescoring_threshold)
keypoint_scores = tf.where(
visible_keypoints, keypoint_scores, tf.zeros_like(keypoint_scores))
num_visible_keypoints = tf.reduce_sum(
class_and_keypoint_mask_float *
tf.cast(visible_keypoints, tf.float32), axis=-1)
num_visible_keypoints = tf.math.maximum(num_visible_keypoints, 1.0)
scores_for_class = (1./num_visible_keypoints) * (
tf.reduce_sum(class_and_keypoint_mask_float *
scores[:, :, tf.newaxis] *
keypoint_scores, axis=-1))
scores = tf.where(classes == class_id,
scores_for_class,
scores)
return scores
def preprocess(self, inputs):
outputs = shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn)
resized_inputs, true_image_shapes = outputs
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def predict(self, preprocessed_inputs, _):
"""Predicts CenterNet prediction tensors given an input batch.
Feature extractors are free to produce predictions from multiple feature
maps and therefore we return a dictionary mapping strings to lists.
E.g. the hourglass backbone produces two feature maps.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding predicted tensors with
'preprocessed_inputs' - The input image after being resized and
preprocessed by the feature extractor.
'extracted_features' - The output of the feature extractor.
'object_center' - A list of size num_feature_outputs containing
float tensors of size [batch_size, output_height, output_width,
num_classes] representing the predicted object center heatmap logits.
'box/scale' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted box height and width at each output
location. This field exists only when object detection task is
specified.
'box/offset' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted y and x offsets at each output location.
'$TASK_NAME/keypoint_heatmap' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, num_keypoints] representing the predicted
keypoint heatmap logits.
'$TASK_NAME/keypoint_offset' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2] representing the predicted keypoint
offsets at each output location.
'$TASK_NAME/keypoint_regression' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2 * num_keypoints] representing the
predicted keypoint regression at each output location.
'segmentation/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_classes] representing the mask logits.
'densepose/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_parts] representing the mask logits for each part.
'densepose/regression' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, 2 * num_parts] representing the DensePose surface
coordinate predictions.
Note the $TASK_NAME is provided by the KeypointEstimation namedtuple
used to differentiate between different keypoint tasks.
"""
features_list = self._feature_extractor(preprocessed_inputs)
predictions = {}
for head_name, heads in self._prediction_head_dict.items():
predictions[head_name] = [
head(feature) for (feature, head) in zip(features_list, heads)
]
predictions['extracted_features'] = features_list
predictions['preprocessed_inputs'] = preprocessed_inputs
self._batched_prediction_tensor_names = predictions.keys()
return predictions
def loss(
self, prediction_dict, true_image_shapes, scope=None,
maximum_normalized_coordinate=1.1):
"""Computes scalar loss tensors with respect to provided groundtruth.
This function implements the various CenterNet losses.
Args:
prediction_dict: a dictionary holding predicted tensors returned by
"predict" function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
scope: Optional scope name.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
A dictionary mapping the keys [
'Loss/object_center',
'Loss/box/scale', (optional)
'Loss/box/offset', (optional)
'Loss/$TASK_NAME/keypoint/heatmap', (optional)
'Loss/$TASK_NAME/keypoint/offset', (optional)
'Loss/$TASK_NAME/keypoint/regression', (optional)
'Loss/segmentation/heatmap', (optional)
'Loss/densepose/heatmap', (optional)
'Loss/densepose/regression', (optional)
'Loss/track/reid'] (optional)
'Loss/track/offset'] (optional)
scalar tensors corresponding to the losses for different tasks. Note the
$TASK_NAME is provided by the KeypointEstimation namedtuple used to
differentiate between different keypoint tasks.
"""
_, input_height, input_width, _ = _get_shape(
prediction_dict['preprocessed_inputs'], 4)
output_height, output_width = (tf.maximum(input_height // self._stride, 1),
tf.maximum(input_width // self._stride, 1))
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(
tf.cast(true_image_shapes, tf.float32) / self._stride)
valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)
object_center_loss = self._compute_object_center_loss(
object_center_predictions=prediction_dict[OBJECT_CENTER],
input_height=input_height,
input_width=input_width,
per_pixel_weights=valid_anchor_weights,
maximum_normalized_coordinate=maximum_normalized_coordinate)
losses = {
OBJECT_CENTER:
self._center_params.object_center_loss_weight * object_center_loss
}
if self._od_params is not None:
od_losses = self._compute_object_detection_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights,
maximum_normalized_coordinate=maximum_normalized_coordinate)
for key in od_losses:
od_losses[key] = od_losses[key] * self._od_params.task_loss_weight
losses.update(od_losses)
if self._kp_params_dict is not None:
for task_name, params in self._kp_params_dict.items():
kp_losses = self._compute_keypoint_estimation_losses(
task_name=task_name,
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in kp_losses:
kp_losses[key] = kp_losses[key] * params.task_loss_weight
losses.update(kp_losses)
if self._mask_params is not None:
seg_losses = self._compute_segmentation_losses(
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in seg_losses:
seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight
losses.update(seg_losses)
if self._densepose_params is not None:
densepose_losses = self._compute_densepose_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in densepose_losses:
densepose_losses[key] = (
densepose_losses[key] * self._densepose_params.task_loss_weight)
losses.update(densepose_losses)
if self._track_params is not None:
track_losses = self._compute_track_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in track_losses:
track_losses[key] = (
track_losses[key] * self._track_params.task_loss_weight)
losses.update(track_losses)
if self._temporal_offset_params is not None:
offset_losses = self._compute_temporal_offset_loss(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in offset_losses:
offset_losses[key] = (
offset_losses[key] * self._temporal_offset_params.task_loss_weight)
losses.update(offset_losses)
# Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the
# losses will be grouped together in Tensorboard.
return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val)
for key, val in losses.items()])
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Produces boxes given a prediction dict returned by predict().
Although predict returns a list of tensors, only the last tensor in
each list is used for making box predictions.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_boxes - A tensor of shape [batch, max_detections, 4]
holding the predicted boxes.
detection_boxes_strided: A tensor of shape [batch_size, num_detections,
4] holding the predicted boxes in absolute coordinates of the
feature extractor's final layer output.
detection_scores: A tensor of shape [batch, max_detections] holding
the predicted score for each box.
detection_multiclass_scores: A tensor of shape [batch, max_detection,
num_classes] holding multiclass score for each box.
detection_classes: An integer tensor of shape [batch, max_detections]
containing the detected class for each box.
num_detections: An integer tensor of shape [batch] containing the
number of detected boxes for each sample in the batch.
detection_keypoints: (Optional) A float tensor of shape [batch,
max_detections, num_keypoints, 2] with normalized keypoints. Any
invalid keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: (Optional) A float tensor of shape [batch,
max_detection, num_keypoints] with scores for each keypoint.
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
detection_surface_coords: (Optional) A float32 tensor of shape [batch,
max_detection, mask_height, mask_width, 2] with DensePose surface
coordinates, in (v, u) format.
detection_embeddings: (Optional) A float tensor of shape [batch,
max_detections, reid_embed_size] containing object embeddings.
"""
object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])
if true_image_shapes is None:
# If true_image_shapes is not provided, we assume the whole image is valid
# and infer the true_image_shapes from the object_center_prob shape.
batch_size, strided_height, strided_width, _ = _get_shape(
object_center_prob, 4)
true_image_shapes = tf.stack(
[strided_height * self._stride, strided_width * self._stride,
tf.constant(len(self._feature_extractor._channel_means))]) # pylint: disable=protected-access
true_image_shapes = tf.stack([true_image_shapes] * batch_size, axis=0)
else:
# Mask object centers by true_image_shape. [batch, h, w, 1]
object_center_mask = mask_from_true_image_shape(
_get_shape(object_center_prob, 4), true_image_shapes)
object_center_prob *= object_center_mask
# Get x, y and channel indices corresponding to the top indices in the class
# center predictions.
detection_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(
object_center_prob,
max_pool_kernel_size=self._center_params.peak_max_pool_kernel_size,
k=self._center_params.max_box_predictions))
multiclass_scores = tf.gather_nd(
object_center_prob, tf.stack([y_indices, x_indices], -1), batch_dims=1)
num_detections = tf.reduce_sum(
tf.cast(detection_scores > 0, tf.int32), axis=1)
postprocess_dict = {
fields.DetectionResultFields.detection_scores: detection_scores,
fields.DetectionResultFields.detection_multiclass_scores:
multiclass_scores,
fields.DetectionResultFields.detection_classes: channel_indices,
fields.DetectionResultFields.num_detections: num_detections,
}
if self._output_prediction_dict:
postprocess_dict.update(prediction_dict)
postprocess_dict['true_image_shapes'] = true_image_shapes
boxes_strided = None
if self._od_params:
boxes_strided = (
prediction_tensors_to_boxes(y_indices, x_indices,
prediction_dict[BOX_SCALE][-1],
prediction_dict[BOX_OFFSET][-1]))
boxes = convert_strided_predictions_to_normalized_boxes(
boxes_strided, self._stride, true_image_shapes)
postprocess_dict.update({
fields.DetectionResultFields.detection_boxes: boxes,
'detection_boxes_strided': boxes_strided,
})
if self._kp_params_dict:
# If the model is trained to predict only one class of object and its
# keypoint, we fall back to a simpler postprocessing function which uses
# the ops that are supported by tf.lite on GPU.
clip_keypoints = self._should_clip_keypoints()
if len(self._kp_params_dict) == 1 and self._num_classes == 1:
task_name, kp_params = next(iter(self._kp_params_dict.items()))
keypoint_depths = None
if kp_params.argmax_postprocessing:
keypoints, keypoint_scores = (
prediction_to_keypoints_argmax(
prediction_dict,
object_y_indices=y_indices,
object_x_indices=x_indices,
boxes=boxes_strided,
task_name=task_name,
kp_params=kp_params))
else:
(keypoints, keypoint_scores,
keypoint_depths) = self._postprocess_keypoints_single_class(
prediction_dict, channel_indices, y_indices, x_indices,
boxes_strided, num_detections)
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints, keypoint_scores, self._stride, true_image_shapes,
clip_out_of_frame_keypoints=clip_keypoints))
if keypoint_depths is not None:
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoint_depths:
keypoint_depths
})
else:
# Multi-class keypoint estimation task does not support depth
# estimation.
assert all([
not kp_dict.predict_depth
for kp_dict in self._kp_params_dict.values()
])
keypoints, keypoint_scores = self._postprocess_keypoints_multi_class(
prediction_dict, channel_indices, y_indices, x_indices,
boxes_strided, num_detections)
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints, keypoint_scores, self._stride, true_image_shapes,
clip_out_of_frame_keypoints=clip_keypoints))
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoints: keypoints,
fields.DetectionResultFields.detection_keypoint_scores:
keypoint_scores
})
if self._od_params is None:
# Still output the box prediction by enclosing the keypoints for
# evaluation purpose.
boxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
keypoints, keypoints_axis=2)
postprocess_dict.update({
fields.DetectionResultFields.detection_boxes: boxes,
})
if self._mask_params:
masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1])
densepose_part_heatmap, densepose_surface_coords = None, None
densepose_class_index = 0
if self._densepose_params:
densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1]
densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1]
densepose_class_index = self._densepose_params.class_id
instance_masks, surface_coords = (
convert_strided_predictions_to_instance_masks(
boxes, channel_indices, masks, true_image_shapes,
densepose_part_heatmap, densepose_surface_coords,
stride=self._stride, mask_height=self._mask_params.mask_height,
mask_width=self._mask_params.mask_width,
score_threshold=self._mask_params.score_threshold,
densepose_class_index=densepose_class_index))
postprocess_dict[
fields.DetectionResultFields.detection_masks] = instance_masks
if self._densepose_params:
postprocess_dict[
fields.DetectionResultFields.detection_surface_coords] = (
surface_coords)
if self._track_params:
embeddings = self._postprocess_embeddings(prediction_dict,
y_indices, x_indices)
postprocess_dict.update({
fields.DetectionResultFields.detection_embeddings: embeddings
})
if self._temporal_offset_params:
offsets = prediction_tensors_to_temporal_offsets(
y_indices, x_indices,
prediction_dict[TEMPORAL_OFFSET][-1])
postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets
if self._non_max_suppression_fn:
boxes = tf.expand_dims(
postprocess_dict.pop(fields.DetectionResultFields.detection_boxes),
axis=-2)
multiclass_scores = postprocess_dict[
fields.DetectionResultFields.detection_multiclass_scores]
num_classes = tf.shape(multiclass_scores)[2]
class_mask = tf.cast(
tf.one_hot(
postprocess_dict[fields.DetectionResultFields.detection_classes],
depth=num_classes), tf.bool)
# Surpress the scores of those unselected classes to be zeros. Otherwise,
# the downstream NMS ops might be confused and introduce issues.
multiclass_scores = tf.where(
class_mask, multiclass_scores, tf.zeros_like(multiclass_scores))
num_valid_boxes = postprocess_dict.pop(
fields.DetectionResultFields.num_detections)
# Remove scores and classes as NMS will compute these form multiclass
# scores.
postprocess_dict.pop(fields.DetectionResultFields.detection_scores)
postprocess_dict.pop(fields.DetectionResultFields.detection_classes)
(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,
num_detections) = self._non_max_suppression_fn(
boxes,
multiclass_scores,
additional_fields=postprocess_dict,
num_valid_boxes=num_valid_boxes)
postprocess_dict = nmsed_additional_fields
postprocess_dict[
fields.DetectionResultFields.detection_boxes] = nmsed_boxes
postprocess_dict[
fields.DetectionResultFields.detection_scores] = nmsed_scores
postprocess_dict[
fields.DetectionResultFields.detection_classes] = nmsed_classes
postprocess_dict[
fields.DetectionResultFields.num_detections] = num_detections
postprocess_dict.update(nmsed_additional_fields)
# Perform the rescoring once the NMS is applied to make sure the rescored
# scores won't be washed out by the NMS function.
if self._kp_params_dict:
channel_indices = postprocess_dict[
fields.DetectionResultFields.detection_classes]
detection_scores = postprocess_dict[
fields.DetectionResultFields.detection_scores]
keypoint_scores = postprocess_dict[
fields.DetectionResultFields.detection_keypoint_scores]
# Update instance scores based on keypoints.
scores = self._rescore_instances(
channel_indices, detection_scores, keypoint_scores)
postprocess_dict.update({
fields.DetectionResultFields.detection_scores: scores,
})
return postprocess_dict
def postprocess_single_instance_keypoints(
self,
prediction_dict,
true_image_shapes):
"""Postprocess for predicting single instance keypoints.
This postprocess function is a special case of predicting the keypoint of
a single instance in the image (original CenterNet postprocess supports
multi-instance prediction). Due to the simplification assumption, this
postprocessing function achieves much faster inference time.
Here is a short list of the modifications made in this function:
1) Assume the model predicts only single class keypoint.
2) Assume there is only one instance in the image. If multiple instances
appear in the image, the model tends to predict the one that is closer
to the image center (the other ones are considered as background and
are rejected by the model).
3) Avoid using top_k ops in the postprocessing logics since it is slower
than using argmax.
4) The predictions other than the keypoints are ignored, e.g. boxes.
5) The input batch size is assumed to be 1.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
Returns:
detections: a dictionary containing the following fields
detection_keypoints: A float tensor of shape
[1, 1, num_keypoints, 2] with normalized keypoints. Any invalid
keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: A float tensor of shape
[1, 1, num_keypoints] with scores for each keypoint.
"""
# The number of keypoint task is expected to be 1.
assert len(self._kp_params_dict) == 1
task_name, kp_params = next(iter(self._kp_params_dict.items()))
keypoint_heatmap = tf.nn.sigmoid(prediction_dict[get_keypoint_name(
task_name, KEYPOINT_HEATMAP)][-1])
keypoint_offset = prediction_dict[get_keypoint_name(task_name,
KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[get_keypoint_name(
task_name, KEYPOINT_REGRESSION)][-1]
object_heatmap = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])
keypoint_depths = None
if kp_params.predict_depth:
keypoint_depths = prediction_dict[get_keypoint_name(
task_name, KEYPOINT_DEPTH)][-1]
keypoints, keypoint_scores, keypoint_depths = (
prediction_to_single_instance_keypoints(
object_heatmap=object_heatmap,
keypoint_heatmap=keypoint_heatmap,
keypoint_offset=keypoint_offset,
keypoint_regression=keypoint_regression,
kp_params=kp_params,
keypoint_depths=keypoint_depths))
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints,
keypoint_scores,
self._stride,
true_image_shapes,
clip_out_of_frame_keypoints=False))
postprocess_dict = {
fields.DetectionResultFields.detection_keypoints: keypoints,
fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores
}
if kp_params.predict_depth:
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoint_depths:
keypoint_depths
})
return postprocess_dict
def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):
"""Performs postprocessing on embedding predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain embedding prediction
feature maps for tracking task.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
Returns:
embeddings: A [batch_size, max_detection, reid_embed_size] float32
tensor with L2 normalized embeddings extracted from detection box
centers.
"""
embedding_predictions = prediction_dict[TRACK_REID][-1]
embeddings = predicted_embeddings_at_object_centers(
embedding_predictions, y_indices, x_indices)
embeddings, _ = tf.linalg.normalize(embeddings, axis=-1)
return embeddings
def _scatter_keypoints_to_batch(self, num_ind, kpt_coords_for_example,
kpt_scores_for_example,
instance_inds_for_example, max_detections,
total_num_keypoints):
"""Helper function to convert scattered keypoints into batch."""
def left_fn(kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example):
# Scatter into tensor where instances align with original detection
# instances. New shape of keypoint coordinates and scores are
# [1, max_detections, num_total_keypoints, 2] and
# [1, max_detections, num_total_keypoints], respectively.
return _pad_to_full_instance_dim(
kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example,
self._center_params.max_box_predictions)
def right_fn():
kpt_coords_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints, 2], dtype=tf.float32)
kpt_scores_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints], dtype=tf.float32)
return (kpt_coords_for_example_all_det,
kpt_scores_for_example_all_det)
left_fn = functools.partial(left_fn, kpt_coords_for_example,
kpt_scores_for_example,
instance_inds_for_example)
# Use dimension values instead of tf.size for tf.lite compatibility.
return tf.cond(num_ind[0] > 0, left_fn, right_fn)
def _postprocess_keypoints_multi_class(self, prediction_dict, classes,
y_indices, x_indices, boxes,
num_detections):
"""Performs postprocessing on keypoint predictions.
This is the most general keypoint postprocessing function which supports
multiple keypoint tasks (e.g. human and dog keypoints) and multiple object
detection classes. Note that it is the most expensive postprocessing logics
and is currently not tf.lite/tf.js compatible. See
_postprocess_keypoints_single_class if you plan to export the model in more
portable format.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
"""
total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict
in self._kp_params_dict.values())
batch_size, max_detections = _get_shape(classes, 2)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
for ex_ind in range(batch_size):
# The tensors that host the keypoint coordinates and scores for all
# instances and all keypoints. They will be updated by scatter_nd_add for
# each keypoint tasks.
kpt_coords_for_example_all_det = tf.zeros(
[max_detections, total_num_keypoints, 2])
kpt_scores_for_example_all_det = tf.zeros(
[max_detections, total_num_keypoints])
for task_name, kp_params in self._kp_params_dict.items():
keypoint_heatmap = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]
keypoint_offsets = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]
instance_inds = self._get_instance_indices(
classes, num_detections, ex_ind, kp_params.class_id)
# Gather the feature map locations corresponding to the object class.
y_indices_for_kpt_class = tf.gather(y_indices, instance_inds, axis=1)
x_indices_for_kpt_class = tf.gather(x_indices, instance_inds, axis=1)
if boxes is None:
boxes_for_kpt_class = None
else:
boxes_for_kpt_class = tf.gather(boxes, instance_inds, axis=1)
# Postprocess keypoints and scores for class and single image. Shapes
# are [1, num_instances_i, num_keypoints_i, 2] and
# [1, num_instances_i, num_keypoints_i], respectively. Note that
# num_instances_i and num_keypoints_i refers to the number of
# instances and keypoints for class i, respectively.
(kpt_coords_for_class, kpt_scores_for_class, _) = (
self._postprocess_keypoints_for_class_and_image(
keypoint_heatmap,
keypoint_offsets,
keypoint_regression,
classes,
y_indices_for_kpt_class,
x_indices_for_kpt_class,
boxes_for_kpt_class,
ex_ind,
kp_params,
))
# Prepare the indices for scatter_nd. The resulting combined_inds has
# the shape of [num_instances_i * num_keypoints_i, 2], where the first
# column corresponds to the instance IDs and the second column
# corresponds to the keypoint IDs.
kpt_inds = tf.constant(kp_params.keypoint_indices, dtype=tf.int32)
kpt_inds = tf.expand_dims(kpt_inds, axis=0)
instance_inds_expand = tf.expand_dims(instance_inds, axis=-1)
kpt_inds_expand = kpt_inds * tf.ones_like(instance_inds_expand)
instance_inds_expand = instance_inds_expand * tf.ones_like(kpt_inds)
combined_inds = tf.stack(
[instance_inds_expand, kpt_inds_expand], axis=2)
combined_inds = tf.reshape(combined_inds, [-1, 2])
# Reshape the keypoint coordinates/scores to [num_instances_i *
# num_keypoints_i, 2]/[num_instances_i * num_keypoints_i] to be used
# by scatter_nd_add.
kpt_coords_for_class = tf.reshape(kpt_coords_for_class, [-1, 2])
kpt_scores_for_class = tf.reshape(kpt_scores_for_class, [-1])
kpt_coords_for_example_all_det = tf.tensor_scatter_nd_add(
kpt_coords_for_example_all_det,
combined_inds, kpt_coords_for_class)
kpt_scores_for_example_all_det = tf.tensor_scatter_nd_add(
kpt_scores_for_example_all_det,
combined_inds, kpt_scores_for_class)
kpt_coords_for_example_list.append(
tf.expand_dims(kpt_coords_for_example_all_det, axis=0))
kpt_scores_for_example_list.append(
tf.expand_dims(kpt_scores_for_example_all_det, axis=0))
# Concatenate all keypoints and scores from all examples in the batch.
# Shapes are [batch_size, max_detections, num_total_keypoints, 2] and
# [batch_size, max_detections, num_total_keypoints], respectively.
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
return keypoints, keypoint_scores
def _postprocess_keypoints_single_class(self, prediction_dict, classes,
y_indices, x_indices, boxes,
num_detections):
"""Performs postprocessing on keypoint predictions (single class only).
This function handles the special case of keypoint task that the model
predicts only one class of the bounding box/keypoint (e.g. person). By the
assumption, the function uses only tf.lite supported ops and should run
faster.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
"""
# This function only works when there is only one keypoint task and the
# number of classes equal to one. For more general use cases, please use
# _postprocess_keypoints instead.
assert len(self._kp_params_dict) == 1 and self._num_classes == 1
task_name, kp_params = next(iter(self._kp_params_dict.items()))
keypoint_heatmap = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]
keypoint_offsets = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]
keypoint_depth_predictions = None
if kp_params.predict_depth:
keypoint_depth_predictions = prediction_dict[get_keypoint_name(
task_name, KEYPOINT_DEPTH)][-1]
batch_size, _ = _get_shape(classes, 2)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
kpt_depths_for_example_list = []
for ex_ind in range(batch_size):
# Postprocess keypoints and scores for class and single image. Shapes
# are [1, max_detections, num_keypoints, 2] and
# [1, max_detections, num_keypoints], respectively.
(kpt_coords_for_class, kpt_scores_for_class, kpt_depths_for_class) = (
self._postprocess_keypoints_for_class_and_image(
keypoint_heatmap,
keypoint_offsets,
keypoint_regression,
classes,
y_indices,
x_indices,
boxes,
ex_ind,
kp_params,
keypoint_depth_predictions=keypoint_depth_predictions))
kpt_coords_for_example_list.append(kpt_coords_for_class)
kpt_scores_for_example_list.append(kpt_scores_for_class)
kpt_depths_for_example_list.append(kpt_depths_for_class)
# Concatenate all keypoints and scores from all examples in the batch.
# Shapes are [batch_size, max_detections, num_keypoints, 2] and
# [batch_size, max_detections, num_keypoints], respectively.
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
keypoint_depths = None
if kp_params.predict_depth:
keypoint_depths = tf.concat(kpt_depths_for_example_list, axis=0)
return keypoints, keypoint_scores, keypoint_depths
def _get_instance_indices(self, classes, num_detections, batch_index,
class_id):
"""Gets the instance indices that match the target class ID.
Args:
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
batch_index: An integer specifying the index for an example in the batch.
class_id: Class id
Returns:
instance_inds: A [num_instances] int32 tensor where each element indicates
the instance location within the `classes` tensor. This is useful to
associate the refined keypoints with the original detections (i.e.
boxes)
"""
classes = classes[batch_index:batch_index+1, ...]
_, max_detections = shape_utils.combined_static_and_dynamic_shape(
classes)
# Get the detection indices corresponding to the target class.
# Call tf.math.equal with matched tensor shape to make it tf.lite
# compatible.
valid_detections_with_kpt_class = tf.math.logical_and(
tf.range(max_detections) < num_detections[batch_index],
tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id)))
instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]
# Cast the indices tensor to int32 for tf.lite compatibility.
return tf.cast(instance_inds, tf.int32)
def _postprocess_keypoints_for_class_and_image(
self,
keypoint_heatmap,
keypoint_offsets,
keypoint_regression,
classes,
y_indices,
x_indices,
boxes,
batch_index,
kp_params,
keypoint_depth_predictions=None):
"""Postprocess keypoints for a single image and class.
Args:
keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32
tensor with keypoint heatmaps.
keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with
local offsets to keypoint centers.
keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]
float32 tensor with regressed offsets to all keypoints.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with detected
boxes in the output (strided) frame.
batch_index: An integer specifying the index for an example in the batch.
kp_params: A `KeypointEstimationParams` object with parameters for a
single keypoint class.
keypoint_depth_predictions: (optional) A [batch_size, height, width, 1]
float32 tensor representing the keypoint depth prediction.
Returns:
A tuple of
refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor
with refined keypoints for a single class in a single image, expressed
in the output (strided) coordinate frame. Note that `num_instances` is a
dynamic dimension, and corresponds to the number of valid detections
for the specific class.
refined_scores: A [1, num_instances, num_keypoints] float32 tensor with
keypoint scores.
refined_depths: A [1, num_instances, num_keypoints] float32 tensor with
keypoint depths. Return None if the input keypoint_depth_predictions is
None.
"""
num_keypoints = len(kp_params.keypoint_indices)
keypoint_heatmap = tf.nn.sigmoid(
keypoint_heatmap[batch_index:batch_index+1, ...])
keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...]
keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...]
keypoint_depths = None
if keypoint_depth_predictions is not None:
keypoint_depths = keypoint_depth_predictions[batch_index:batch_index + 1,
...]
y_indices = y_indices[batch_index:batch_index+1, ...]
x_indices = x_indices[batch_index:batch_index+1, ...]
if boxes is None:
boxes_slice = None
else:
boxes_slice = boxes[batch_index:batch_index+1, ...]
# Gather the regressed keypoints. Final tensor has shape
# [1, num_instances, num_keypoints, 2].
regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(
keypoint_regression, y_indices, x_indices)
regressed_keypoints_for_objects = tf.reshape(
regressed_keypoints_for_objects, [1, -1, num_keypoints, 2])
# Get the candidate keypoints and scores.
# The shape of keypoint_candidates and keypoint_scores is:
# [1, num_candidates_per_keypoint, num_keypoints, 2] and
# [1, num_candidates_per_keypoint, num_keypoints], respectively.
(keypoint_candidates, keypoint_scores, num_keypoint_candidates,
keypoint_depth_candidates) = (
prediction_tensors_to_keypoint_candidates(
keypoint_heatmap,
keypoint_offsets,
keypoint_score_threshold=(
kp_params.keypoint_candidate_score_threshold),
max_pool_kernel_size=kp_params.peak_max_pool_kernel_size,
max_candidates=kp_params.num_candidates_per_keypoint,
keypoint_depths=keypoint_depths))
kpts_std_dev_postprocess = [
s * kp_params.std_dev_multiplier for s in kp_params.keypoint_std_dev
]
# Get the refined keypoints and scores, of shape
# [1, num_instances, num_keypoints, 2] and
# [1, num_instances, num_keypoints], respectively.
(refined_keypoints, refined_scores, refined_depths) = refine_keypoints(
regressed_keypoints_for_objects,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=boxes_slice,
unmatched_keypoint_score=kp_params.unmatched_keypoint_score,
box_scale=kp_params.box_scale,
candidate_search_scale=kp_params.candidate_search_scale,
candidate_ranking_mode=kp_params.candidate_ranking_mode,
score_distance_offset=kp_params.score_distance_offset,
keypoint_depth_candidates=keypoint_depth_candidates,
keypoint_score_threshold=(kp_params.keypoint_candidate_score_threshold),
score_distance_multiplier=kp_params.score_distance_multiplier,
keypoint_std_dev=kpts_std_dev_postprocess)
return refined_keypoints, refined_scores, refined_depths
def regularization_losses(self):
return []
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
raise RuntimeError('CenterNetMetaArch not supported under TF1.x.')
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (not implemented
in CenterNet) is intended to be used to restore Slim-based models when
running Tensorflow 1.x.
TODO(jonathanhuang): Make this function consistent with other
meta-architectures.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`, `fine_tune`.
Default 'detection'.
'detection': used when loading models pre-trained on other detection
tasks. With this checkpoint type the weights of the feature extractor
are expected under the attribute 'feature_extractor'.
'classification': used when loading models pre-trained on an image
classification task. Note that only the encoder section of the network
is loaded and not the upsampling layers. With this checkpoint type,
the weights of only the encoder section are expected under the
attribute 'feature_extractor'.
'fine_tune': used when loading the entire CenterNet feature extractor
pre-trained on other tasks. The checkpoints saved during CenterNet
model training can be directly loaded using this type. With this
checkpoint type, the weights of the feature extractor are expected
under the attribute 'model._feature_extractor'.
For more details, see the tensorflow section on Loading mechanics.
https://www.tensorflow.org/guide/checkpoint#loading_mechanics
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
if fine_tune_checkpoint_type == 'detection':
feature_extractor_model = tf.train.Checkpoint(
_feature_extractor=self._feature_extractor)
return {'model': feature_extractor_model}
elif fine_tune_checkpoint_type == 'classification':
return {
'feature_extractor':
self._feature_extractor.classification_backbone
}
elif fine_tune_checkpoint_type == 'full':
return {'model': self}
elif fine_tune_checkpoint_type == 'fine_tune':
raise ValueError(('"fine_tune" is no longer supported for CenterNet. '
'Please set fine_tune_checkpoint_type to "detection"'
' which has the same functionality. If you are using'
' the ExtremeNet checkpoint, download the new version'
' from the model zoo.'))
else:
raise ValueError('Unknown fine tune checkpoint type {}'.format(
fine_tune_checkpoint_type))
def updates(self):
if tf_version.is_tf2():
raise RuntimeError('This model is intended to be used with model_lib_v2 '
'which does not support updates()')
else:
update_ops = []
slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Copy the slim ops to avoid modifying the collection
if slim_update_ops:
update_ops.extend(slim_update_ops)
return update_ops
| 225,528 | 45.433807 | 110 | py |
models | models-master/research/object_detection/meta_architectures/context_rcnn_meta_arch.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Context R-CNN meta-architecture definition.
This adds the ability to use attention into contextual features within the
Faster R-CNN object detection framework to improve object detection performance.
See https://arxiv.org/abs/1912.03538 for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow.compat.v1 as tf
from object_detection.core import box_predictor
from object_detection.core import standard_fields as fields
from object_detection.meta_architectures import context_rcnn_lib
from object_detection.meta_architectures import context_rcnn_lib_tf2
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.protos import faster_rcnn_pb2
from object_detection.utils import ops
from object_detection.utils import tf_version
_UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__'
class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch):
"""Context R-CNN Meta-architecture definition."""
def __init__(self,
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
number_of_stages,
first_stage_anchor_generator,
first_stage_target_assigner,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope_fn,
first_stage_box_predictor_kernel_size,
first_stage_box_predictor_depth,
first_stage_minibatch_size,
first_stage_sampler,
first_stage_non_max_suppression_fn,
first_stage_max_proposals,
first_stage_localization_loss_weight,
first_stage_objectness_loss_weight,
crop_and_resize_fn,
initial_crop_size,
maxpool_kernel_size,
maxpool_stride,
second_stage_target_assigner,
second_stage_mask_rcnn_box_predictor,
second_stage_batch_size,
second_stage_sampler,
second_stage_non_max_suppression_fn,
second_stage_score_conversion_fn,
second_stage_localization_loss_weight,
second_stage_classification_loss_weight,
second_stage_classification_loss,
second_stage_mask_prediction_loss_weight=1.0,
hard_example_miner=None,
parallel_iterations=16,
add_summaries=True,
clip_anchors_to_image=False,
use_static_shapes=False,
resize_masks=True,
freeze_batchnorm=False,
return_raw_detections_during_predict=False,
output_final_box_features=False,
output_final_box_rpn_features=False,
attention_bottleneck_dimension=None,
attention_temperature=None,
use_self_attention=False,
use_long_term_attention=True,
self_attention_in_sequence=False,
num_attention_heads=1,
num_attention_layers=1,
attention_position=(
faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER)
):
"""ContextRCNNMetaArch Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
num_classes: Number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
image_resizer_fn: A callable for image resizing. This callable
takes a rank-3 image tensor of shape [height, width, channels]
(corresponding to a single image), an optional rank-3 instance mask
tensor of shape [num_masks, height, width] and returns a resized rank-3
image tensor, a resized mask tensor if one was provided in the input. In
addition this callable must also return a 1-D tensor of the form
[height, width, channels] containing the size of the true image, as the
image resizer can perform zero padding. See protos/image_resizer.proto.
feature_extractor: A FasterRCNNFeatureExtractor object.
number_of_stages: An integer values taking values in {1, 2, 3}. If
1, the function will construct only the Region Proposal Network (RPN)
part of the model. If 2, the function will perform box refinement and
other auxiliary predictions all in the second stage. If 3, it will
extract features from refined boxes and perform the auxiliary
predictions on the non-maximum suppressed refined boxes.
If is_training is true and the value of number_of_stages is 3, it is
reduced to 2 since all the model heads are trained in parallel in second
stage during training.
first_stage_anchor_generator: An anchor_generator.AnchorGenerator object
(note that currently we only support
grid_anchor_generator.GridAnchorGenerator objects)
first_stage_target_assigner: Target assigner to use for first stage of
Faster R-CNN (RPN).
first_stage_atrous_rate: A single integer indicating the atrous rate for
the single convolution op which is applied to the `rpn_features_to_crop`
tensor to obtain a tensor to be used for box prediction. Some feature
extractors optionally allow for producing feature maps computed at
denser resolutions. The atrous rate is used to compensate for the
denser feature maps by using an effectively larger receptive field.
(This should typically be set to 1).
first_stage_box_predictor_arg_scope_fn: Either a
Keras layer hyperparams object or a function to construct tf-slim
arg_scope for conv2d, separable_conv2d and fully_connected ops. Used
for the RPN box predictor. If it is a keras hyperparams object the
RPN box predictor will be a Keras model. If it is a function to
construct an arg scope it will be a tf-slim box predictor.
first_stage_box_predictor_kernel_size: Kernel size to use for the
convolution op just prior to RPN box predictions.
first_stage_box_predictor_depth: Output depth for the convolution op
just prior to RPN box predictions.
first_stage_minibatch_size: The "batch size" to use for computing the
objectness and location loss of the region proposal network. This
"batch size" refers to the number of anchors selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
first_stage_sampler: Sampler to use for first stage loss (RPN loss).
first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores` and optional `clip_window`(with
all other inputs already set) and returns a dictionary containing
tensors with keys: `detection_boxes`, `detection_scores`,
`detection_classes`, `num_detections`. This is used to perform non max
suppression on the boxes predicted by the Region Proposal Network
(RPN).
See `post_processing.batch_multiclass_non_max_suppression` for the type
and shape of these tensors.
first_stage_max_proposals: Maximum number of boxes to retain after
performing Non-Max Suppression (NMS) on the boxes predicted by the
Region Proposal Network (RPN).
first_stage_localization_loss_weight: A float
first_stage_objectness_loss_weight: A float
crop_and_resize_fn: A differentiable resampler to use for cropping RPN
proposal features.
initial_crop_size: A single integer indicating the output size
(width and height are set to be the same) of the initial bilinear
interpolation based cropping during ROI pooling.
maxpool_kernel_size: A single integer indicating the kernel size of the
max pool op on the cropped feature map during ROI pooling.
maxpool_stride: A single integer indicating the stride of the max pool
op on the cropped feature map during ROI pooling.
second_stage_target_assigner: Target assigner to use for second stage of
Faster R-CNN. If the model is configured with multiple prediction heads,
this target assigner is used to generate targets for all heads (with the
correct `unmatched_class_label`).
second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for
the second stage.
second_stage_batch_size: The batch size used for computing the
classification and refined location loss of the box classifier. This
"batch size" refers to the number of proposals selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
second_stage_sampler: Sampler to use for second stage loss (box
classifier loss).
second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores`, optional `clip_window` and
optional (kwarg) `mask` inputs (with all other inputs already set)
and returns a dictionary containing tensors with keys:
`detection_boxes`, `detection_scores`, `detection_classes`,
`num_detections`, and (optionally) `detection_masks`. See
`post_processing.batch_multiclass_non_max_suppression` for the type and
shape of these tensors.
second_stage_score_conversion_fn: Callable elementwise nonlinearity
(that takes tensors as inputs and returns tensors). This is usually
used to convert logits to probabilities.
second_stage_localization_loss_weight: A float indicating the scale factor
for second stage localization loss.
second_stage_classification_loss_weight: A float indicating the scale
factor for second stage classification loss.
second_stage_classification_loss: Classification loss used by the second
stage classifier. Either losses.WeightedSigmoidClassificationLoss or
losses.WeightedSoftmaxClassificationLoss.
second_stage_mask_prediction_loss_weight: A float indicating the scale
factor for second stage mask prediction loss. This is applicable only if
second stage box predictor is configured to predict masks.
hard_example_miner: A losses.HardExampleMiner object (can be None).
parallel_iterations: (Optional) The number of iterations allowed to run
in parallel for calls to tf.map_fn.
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
clip_anchors_to_image: Normally, anchors generated for a given image size
are pruned during training if they lie outside the image window. This
option clips the anchors to be within the image instead of pruning.
use_static_shapes: If True, uses implementation of ops with static shape
guarantees.
resize_masks: Indicates whether the masks presend in the groundtruth
should be resized in the model with `image_resizer_fn`
freeze_batchnorm: Whether to freeze batch norm parameters in the first
stage box predictor during training or not. When training with a small
batch size (e.g. 1), it is desirable to freeze batch norm update and
use pretrained batch norm params.
return_raw_detections_during_predict: Whether to return raw detection
boxes in the predict() method. These are decoded boxes that have not
been through postprocessing (i.e. NMS). Default False.
output_final_box_features: Whether to output final box features. If true,
it crops the feature map based on the final box prediction and returns
it in the output dict as detection_features.
output_final_box_rpn_features: Whether to output rpn box features. If
true, it crops the rpn feature map based on the final box prediction and
returns it in the output dict as detection_features.
attention_bottleneck_dimension: A single integer. The bottleneck feature
dimension of the attention block.
attention_temperature: A single float. The attention temperature.
use_self_attention: Whether to use self-attention within the box features
in the current frame.
use_long_term_attention: Whether to use attention into the context
features.
self_attention_in_sequence: Whether self attention and long term attention
are in sequence or parallel.
num_attention_heads: The number of attention heads to use.
num_attention_layers: The number of attention layers to use.
attention_position: Whether attention should occur post rpn or post
box classifier. Options are specified in the faster rcnn proto,
default is post box classifier.
Raises:
ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at
training time.
ValueError: If first_stage_anchor_generator is not of type
grid_anchor_generator.GridAnchorGenerator.
"""
super(ContextRCNNMetaArch, self).__init__(
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
number_of_stages,
first_stage_anchor_generator,
first_stage_target_assigner,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope_fn,
first_stage_box_predictor_kernel_size,
first_stage_box_predictor_depth,
first_stage_minibatch_size,
first_stage_sampler,
first_stage_non_max_suppression_fn,
first_stage_max_proposals,
first_stage_localization_loss_weight,
first_stage_objectness_loss_weight,
crop_and_resize_fn,
initial_crop_size,
maxpool_kernel_size,
maxpool_stride,
second_stage_target_assigner,
second_stage_mask_rcnn_box_predictor,
second_stage_batch_size,
second_stage_sampler,
second_stage_non_max_suppression_fn,
second_stage_score_conversion_fn,
second_stage_localization_loss_weight,
second_stage_classification_loss_weight,
second_stage_classification_loss,
second_stage_mask_prediction_loss_weight=(
second_stage_mask_prediction_loss_weight),
hard_example_miner=hard_example_miner,
parallel_iterations=parallel_iterations,
add_summaries=add_summaries,
clip_anchors_to_image=clip_anchors_to_image,
use_static_shapes=use_static_shapes,
resize_masks=resize_masks,
freeze_batchnorm=freeze_batchnorm,
return_raw_detections_during_predict=(
return_raw_detections_during_predict),
output_final_box_features=output_final_box_features,
output_final_box_rpn_features=output_final_box_rpn_features)
self._attention_position = attention_position
if tf_version.is_tf1():
self._context_feature_extract_fn = functools.partial(
context_rcnn_lib._compute_box_context_attention,
bottleneck_dimension=attention_bottleneck_dimension,
attention_temperature=attention_temperature,
is_training=is_training,
max_num_proposals=self.max_num_proposals,
use_self_attention=use_self_attention,
use_long_term_attention=use_long_term_attention,
self_attention_in_sequence=self_attention_in_sequence,
num_attention_heads=num_attention_heads,
num_attention_layers=num_attention_layers)
else:
if use_self_attention:
raise NotImplementedError
if self_attention_in_sequence:
raise NotImplementedError
if not use_long_term_attention:
raise NotImplementedError
if num_attention_heads > 1:
raise NotImplementedError
if num_attention_layers > 1:
raise NotImplementedError
self._context_feature_extract_fn = context_rcnn_lib_tf2.AttentionBlock(
bottleneck_dimension=attention_bottleneck_dimension,
attention_temperature=attention_temperature,
is_training=is_training,
max_num_proposals=self.max_num_proposals)
@staticmethod
def get_side_inputs(features):
"""Overrides the get_side_inputs function in the base class.
This function returns context_features and valid_context_size, which will be
used in the _compute_second_stage_input_feature_maps function.
Args:
features: A dictionary of tensors.
Returns:
A dictionary of tensors contains context_features and valid_context_size.
Raises:
ValueError: If context_features or valid_context_size is not in the
features.
"""
if (fields.InputDataFields.context_features not in features or
fields.InputDataFields.valid_context_size not in features):
raise ValueError(
'Please make sure context_features and valid_context_size are in the '
'features')
return {
fields.InputDataFields.context_features:
features[fields.InputDataFields.context_features],
fields.InputDataFields.valid_context_size:
features[fields.InputDataFields.valid_context_size]
}
def _predict_second_stage(self, rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop, anchors, image_shape,
true_image_shapes, **side_inputs):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_box_encodings: 3-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes.
rpn_objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
rpn_features_to_crop: A list of 4-D float32 or bfloat16 tensor with shape
[batch_size, height_i, width_i, depth] representing image features to
crop using the proposal boxes predicted by the RPN.
anchors: 2-D float tensor of shape
[num_anchors, self._box_coder.code_size].
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**side_inputs: additional tensors that are required by the network.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D float32 tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D float32 tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
5) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in normalized coordinates. Can be used to override the
boxes proposed by the RPN, thus enabling one to extract features and
get box classification and prediction for externally selected areas
of the image.
6) box_classifier_features: a 4-D float32/bfloat16 tensor
representing the features for each proposal.
If self._return_raw_detections_during_predict is True, the dictionary
will also contain:
7) raw_detection_boxes: a 4-D float32 tensor with shape
[batch_size, self.max_num_proposals, num_classes, 4] in normalized
coordinates.
8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape
[batch_size, self.max_num_proposals, num_classes].
"""
proposal_boxes_normalized, num_proposals = self._proposal_postprocess(
rpn_box_encodings, rpn_objectness_predictions_with_background, anchors,
image_shape, true_image_shapes)
prediction_dict = self._box_prediction(rpn_features_to_crop,
proposal_boxes_normalized,
image_shape, true_image_shapes,
num_proposals,
**side_inputs)
prediction_dict['num_proposals'] = num_proposals
return prediction_dict
def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized,
image_shape, true_image_shapes, num_proposals,
**side_inputs):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_features_to_crop: A list 4-D float32 or bfloat16 tensor with shape
[batch_size, height_i, width_i, depth] representing image features to
crop using the proposal boxes predicted by the RPN.
proposal_boxes_normalized: A float tensor with shape [batch_size,
max_num_proposals, 4] representing the (potentially zero padded)
proposal boxes for all images in the batch. These boxes are represented
as normalized coordinates.
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
num_proposals: The number of valid box proposals.
**side_inputs: additional tensors that are required by the network.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D float32 tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D float32 tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
4) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in normalized coordinates. Can be used to override the
boxes proposed by the RPN, thus enabling one to extract features and
get box classification and prediction for externally selected areas
of the image.
5) box_classifier_features: a 4-D float32/bfloat16 tensor
representing the features for each proposal.
If self._return_raw_detections_during_predict is True, the dictionary
will also contain:
6) raw_detection_boxes: a 4-D float32 tensor with shape
[batch_size, self.max_num_proposals, num_classes, 4] in normalized
coordinates.
7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape
[batch_size, self.max_num_proposals, num_classes].
8) final_anchors: a 3-D float tensor of shape [batch_size,
self.max_num_proposals, 4] containing the reference anchors for raw
detection boxes in normalized coordinates.
"""
flattened_proposal_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, proposal_boxes_normalized,
image_shape, num_proposals, **side_inputs))
box_classifier_features = self._extract_box_classifier_features(
flattened_proposal_feature_maps, num_proposals, **side_inputs)
if self._mask_rcnn_box_predictor.is_keras_model:
box_predictions = self._mask_rcnn_box_predictor(
[box_classifier_features],
prediction_stage=2)
else:
box_predictions = self._mask_rcnn_box_predictor.predict(
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=2)
refined_box_encodings = tf.squeeze(
box_predictions[box_predictor.BOX_ENCODINGS],
axis=1, name='all_refined_box_encodings')
class_predictions_with_background = tf.squeeze(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1, name='all_class_predictions_with_background')
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape, self._parallel_iterations)
prediction_dict = {
'refined_box_encodings': tf.cast(refined_box_encodings,
dtype=tf.float32),
'class_predictions_with_background':
tf.cast(class_predictions_with_background, dtype=tf.float32),
'proposal_boxes': absolute_proposal_boxes,
'box_classifier_features': box_classifier_features,
'proposal_boxes_normalized': proposal_boxes_normalized,
'final_anchors': proposal_boxes_normalized
}
if self._return_raw_detections_during_predict:
prediction_dict.update(self._raw_detections_and_feature_map_inds(
refined_box_encodings, absolute_proposal_boxes, true_image_shapes))
return prediction_dict
def _compute_second_stage_input_feature_maps(self, features_to_crop,
proposal_boxes_normalized,
image_shape,
num_proposals,
context_features,
valid_context_size):
"""Crops to a set of proposals from the feature map for a batch of images.
This function overrides the one in the FasterRCNNMetaArch. Aside from
cropping and resizing the feature maps, which is done in the parent class,
it adds context attention features to the box features.
Args:
features_to_crop: A float32 Tensor with shape [batch_size, height, width,
depth]
proposal_boxes_normalized: A float32 Tensor with shape [batch_size,
num_proposals, box_code_size] containing proposal boxes in normalized
coordinates.
image_shape: A 1D int32 tensors of size [4] containing the image shape.
num_proposals: The number of valid box proposals.
context_features: A float Tensor of shape [batch_size, context_size,
num_context_features].
valid_context_size: A int32 Tensor of shape [batch_size].
Returns:
A float32 Tensor with shape [K, new_height, new_width, depth].
"""
del image_shape
box_features = self._crop_and_resize_fn(
features_to_crop, proposal_boxes_normalized, None,
[self._initial_crop_size, self._initial_crop_size])
flattened_box_features = self._flatten_first_two_dimensions(box_features)
flattened_box_features = self._maxpool_layer(flattened_box_features)
if self._attention_position == (
faster_rcnn_pb2.AttentionPosition.POST_RPN):
attention_features = self._context_feature_extract_fn(
box_features=flattened_box_features,
num_proposals=num_proposals,
context_features=context_features,
valid_context_size=valid_context_size)
# Adds box features with attention features.
flattened_box_features += self._flatten_first_two_dimensions(
attention_features)
return flattened_box_features
def _extract_box_classifier_features(
self, flattened_box_features, num_proposals, context_features,
valid_context_size,
attention_position=(
faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER)):
if self._feature_extractor_for_box_classifier_features == (
_UNINITIALIZED_FEATURE_EXTRACTOR):
self._feature_extractor_for_box_classifier_features = (
self._feature_extractor.get_box_classifier_feature_extractor_model(
name=self.second_stage_feature_extractor_scope))
if self._feature_extractor_for_box_classifier_features:
box_classifier_features = (
self._feature_extractor_for_box_classifier_features(
flattened_box_features))
else:
box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_box_features,
scope=self.second_stage_feature_extractor_scope))
if self._attention_position == (
faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER):
attention_features = self._context_feature_extract_fn(
box_features=box_classifier_features,
num_proposals=num_proposals,
context_features=context_features,
valid_context_size=valid_context_size)
# Adds box features with attention features.
box_classifier_features += self._flatten_first_two_dimensions(
attention_features)
return box_classifier_features
| 31,821 | 49.9152 | 80 | py |
models | models-master/research/object_detection/meta_architectures/faster_rcnn_meta_arch.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster R-CNN meta-architecture definition.
General tensorflow implementation of Faster R-CNN detection models.
See Faster R-CNN: Ren, Shaoqing, et al.
"Faster R-CNN: Towards real-time object detection with region proposal
networks." Advances in neural information processing systems. 2015.
We allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage,
all of the user facing methods (e.g., predict, postprocess, loss) can be used as
if the model consisted only of the RPN, returning class agnostic proposals
(these can be thought of as approximate detections with no associated class
information). In case of 2 stages, proposals are computed, then passed
through a second stage "box classifier" to yield (multi-class) detections.
Finally, in case of 3 stages which is only used during eval, proposals are
computed, then passed through a second stage "box classifier" that will compute
refined boxes and classes, and then features are pooled from the refined and
non-maximum suppressed boxes and are passed through the box classifier again. If
number of stages is 3 during training it will be reduced to two automatically.
Implementations of Faster R-CNN models must define a new
FasterRCNNFeatureExtractor and override three methods: `preprocess`,
`_extract_proposal_features` (the first stage of the model), and
`_extract_box_classifier_features` (the second stage of the model). Optionally,
the `restore_fn` method can be overridden. See tests for an example.
A few important notes:
+ Batching conventions: We support batched inference and training where
all images within a batch have the same resolution. Batch sizes are determined
dynamically via the shape of the input tensors (rather than being specified
directly as, e.g., a model constructor).
A complication is that due to non-max suppression, we are not guaranteed to get
the same number of proposals from the first stage RPN (region proposal network)
for each image (though in practice, we should often get the same number of
proposals). For this reason we pad to a max number of proposals per image
within a batch. This `self.max_num_proposals` property is set to the
`first_stage_max_proposals` parameter at inference time and the
`second_stage_batch_size` at training time since we subsample the batch to
be sent through the box classifier during training.
For the second stage of the pipeline, we arrange the proposals for all images
within the batch along a single batch dimension. For example, the input to
_extract_box_classifier_features is a tensor of shape
`[total_num_proposals, crop_height, crop_width, depth]` where
total_num_proposals is batch_size * self.max_num_proposals. (And note that per
the above comment, a subset of these entries correspond to zero paddings.)
+ Coordinate representations:
Following the API (see model.DetectionModel definition), our outputs after
postprocessing operations are always normalized boxes however, internally, we
sometimes convert to absolute --- e.g. for loss computation. In particular,
anchors and proposal_boxes are both represented as absolute coordinates.
Images are resized in the `preprocess` method.
The Faster R-CNN meta architecture has two post-processing methods
`_postprocess_rpn` which is applied after first stage and
`_postprocess_box_classifier` which is applied after second stage. There are
three different ways post-processing can happen depending on number_of_stages
configured in the meta architecture:
1. When number_of_stages is 1:
`_postprocess_rpn` is run as part of the `postprocess` method where
true_image_shapes is used to clip proposals, perform non-max suppression and
normalize them.
2. When number of stages is 2:
`_postprocess_rpn` is run as part of the `_predict_second_stage` method where
`resized_image_shapes` is used to clip proposals, perform non-max suppression
and normalize them. In this case `postprocess` method skips `_postprocess_rpn`
and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip
detections, perform non-max suppression and normalize them.
3. When number of stages is 3:
`_postprocess_rpn` is run as part of the `_predict_second_stage` using
`resized_image_shapes` to clip proposals, perform non-max suppression and
normalize them. Subsequently, `_postprocess_box_classifier` is run as part of
`_predict_third_stage` using `true_image_shapes` to clip detections, peform
non-max suppression and normalize them. In this case, the `postprocess` method
skips both `_postprocess_rpn` and `_postprocess_box_classifier`.
"""
from __future__ import print_function
import abc
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import box_predictor
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
_UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__'
class FasterRCNNFeatureExtractor(object):
"""Faster R-CNN Feature Extractor definition."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
first_stage_features_stride: Output stride of extracted RPN feature map.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a relative large batch size
(e.g. 8), it could be desirable to enable batch norm update.
reuse_weights: Whether to reuse variables. Default is None.
weight_decay: float weight decay for feature extractor (default: 0.0).
"""
self._is_training = is_training
self._first_stage_features_stride = first_stage_features_stride
self._train_batch_norm = (batch_norm_trainable and is_training)
self._reuse_weights = tf.AUTO_REUSE if reuse_weights else None
self._weight_decay = weight_decay
@abc.abstractmethod
def preprocess(self, resized_inputs):
"""Feature-extractor specific preprocessing (minus image resizing)."""
pass
def extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
This function is responsible for extracting feature maps from preprocessed
images. These features are used by the region proposal network (RPN) to
predict proposals.
Args:
preprocessed_inputs: A [batch, height, width, channels] float tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping activation tensor names to tensors.
"""
with tf.variable_scope(scope, values=[preprocessed_inputs]):
return self._extract_proposal_features(preprocessed_inputs, scope)
@abc.abstractmethod
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features, to be overridden."""
pass
def extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope(
scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE):
return self._extract_box_classifier_features(proposal_feature_maps, scope)
@abc.abstractmethod
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features, to be overridden."""
pass
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
for scope_name in [first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope]:
if variable.op.name.startswith(scope_name):
var_name = variable.op.name.replace(scope_name + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
class FasterRCNNKerasFeatureExtractor(object):
"""Keras-based Faster R-CNN Feature Extractor definition."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
first_stage_features_stride: Output stride of extracted RPN feature map.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a relative large batch size
(e.g. 8), it could be desirable to enable batch norm update.
weight_decay: float weight decay for feature extractor (default: 0.0).
"""
self._is_training = is_training
self._first_stage_features_stride = first_stage_features_stride
self._train_batch_norm = (batch_norm_trainable and is_training)
self._weight_decay = weight_decay
@abc.abstractmethod
def preprocess(self, resized_inputs):
"""Feature-extractor specific preprocessing (minus image resizing)."""
pass
@abc.abstractmethod
def get_proposal_feature_extractor_model(self, name):
"""Get model that extracts first stage RPN features, to be overridden."""
pass
@abc.abstractmethod
def get_box_classifier_feature_extractor_model(self, name):
"""Get model that extracts second stage box classifier features."""
pass
class FasterRCNNMetaArch(model.DetectionModel):
"""Faster R-CNN Meta-architecture definition."""
def __init__(self,
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
number_of_stages,
first_stage_anchor_generator,
first_stage_target_assigner,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope_fn,
first_stage_box_predictor_kernel_size,
first_stage_box_predictor_depth,
first_stage_minibatch_size,
first_stage_sampler,
first_stage_non_max_suppression_fn,
first_stage_max_proposals,
first_stage_localization_loss_weight,
first_stage_objectness_loss_weight,
crop_and_resize_fn,
initial_crop_size,
maxpool_kernel_size,
maxpool_stride,
second_stage_target_assigner,
second_stage_mask_rcnn_box_predictor,
second_stage_batch_size,
second_stage_sampler,
second_stage_non_max_suppression_fn,
second_stage_score_conversion_fn,
second_stage_localization_loss_weight,
second_stage_classification_loss_weight,
second_stage_classification_loss,
second_stage_mask_prediction_loss_weight=1.0,
hard_example_miner=None,
parallel_iterations=16,
add_summaries=True,
clip_anchors_to_image=False,
use_static_shapes=False,
resize_masks=True,
freeze_batchnorm=False,
return_raw_detections_during_predict=False,
output_final_box_features=False,
output_final_box_rpn_features=False):
"""FasterRCNNMetaArch Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
num_classes: Number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
image_resizer_fn: A callable for image resizing. This callable
takes a rank-3 image tensor of shape [height, width, channels]
(corresponding to a single image), an optional rank-3 instance mask
tensor of shape [num_masks, height, width] and returns a resized rank-3
image tensor, a resized mask tensor if one was provided in the input. In
addition this callable must also return a 1-D tensor of the form
[height, width, channels] containing the size of the true image, as the
image resizer can perform zero padding. See protos/image_resizer.proto.
feature_extractor: A FasterRCNNFeatureExtractor object.
number_of_stages: An integer values taking values in {1, 2, 3}. If
1, the function will construct only the Region Proposal Network (RPN)
part of the model. If 2, the function will perform box refinement and
other auxiliary predictions all in the second stage. If 3, it will
extract features from refined boxes and perform the auxiliary
predictions on the non-maximum suppressed refined boxes.
If is_training is true and the value of number_of_stages is 3, it is
reduced to 2 since all the model heads are trained in parallel in second
stage during training.
first_stage_anchor_generator: An anchor_generator.AnchorGenerator object
(note that currently we only support
grid_anchor_generator.GridAnchorGenerator objects)
first_stage_target_assigner: Target assigner to use for first stage of
Faster R-CNN (RPN).
first_stage_atrous_rate: A single integer indicating the atrous rate for
the single convolution op which is applied to the `rpn_features_to_crop`
tensor to obtain a tensor to be used for box prediction. Some feature
extractors optionally allow for producing feature maps computed at
denser resolutions. The atrous rate is used to compensate for the
denser feature maps by using an effectively larger receptive field.
(This should typically be set to 1).
first_stage_box_predictor_arg_scope_fn: Either a
Keras layer hyperparams object or a function to construct tf-slim
arg_scope for conv2d, separable_conv2d and fully_connected ops. Used
for the RPN box predictor. If it is a keras hyperparams object the
RPN box predictor will be a Keras model. If it is a function to
construct an arg scope it will be a tf-slim box predictor.
first_stage_box_predictor_kernel_size: Kernel size to use for the
convolution op just prior to RPN box predictions.
first_stage_box_predictor_depth: Output depth for the convolution op
just prior to RPN box predictions.
first_stage_minibatch_size: The "batch size" to use for computing the
objectness and location loss of the region proposal network. This
"batch size" refers to the number of anchors selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
first_stage_sampler: Sampler to use for first stage loss (RPN loss).
first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores` and optional `clip_window`(with
all other inputs already set) and returns a dictionary containing
tensors with keys: `detection_boxes`, `detection_scores`,
`detection_classes`, `num_detections`. This is used to perform non max
suppression on the boxes predicted by the Region Proposal Network
(RPN).
See `post_processing.batch_multiclass_non_max_suppression` for the type
and shape of these tensors.
first_stage_max_proposals: Maximum number of boxes to retain after
performing Non-Max Suppression (NMS) on the boxes predicted by the
Region Proposal Network (RPN).
first_stage_localization_loss_weight: A float
first_stage_objectness_loss_weight: A float
crop_and_resize_fn: A differentiable resampler to use for cropping RPN
proposal features.
initial_crop_size: A single integer indicating the output size
(width and height are set to be the same) of the initial bilinear
interpolation based cropping during ROI pooling.
maxpool_kernel_size: A single integer indicating the kernel size of the
max pool op on the cropped feature map during ROI pooling.
maxpool_stride: A single integer indicating the stride of the max pool
op on the cropped feature map during ROI pooling.
second_stage_target_assigner: Target assigner to use for second stage of
Faster R-CNN. If the model is configured with multiple prediction heads,
this target assigner is used to generate targets for all heads (with the
correct `unmatched_class_label`).
second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for
the second stage.
second_stage_batch_size: The batch size used for computing the
classification and refined location loss of the box classifier. This
"batch size" refers to the number of proposals selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
second_stage_sampler: Sampler to use for second stage loss (box
classifier loss).
second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores`, optional `clip_window` and
optional (kwarg) `mask` inputs (with all other inputs already set)
and returns a dictionary containing tensors with keys:
`detection_boxes`, `detection_scores`, `detection_classes`,
`num_detections`, and (optionally) `detection_masks`. See
`post_processing.batch_multiclass_non_max_suppression` for the type and
shape of these tensors.
second_stage_score_conversion_fn: Callable elementwise nonlinearity
(that takes tensors as inputs and returns tensors). This is usually
used to convert logits to probabilities.
second_stage_localization_loss_weight: A float indicating the scale factor
for second stage localization loss.
second_stage_classification_loss_weight: A float indicating the scale
factor for second stage classification loss.
second_stage_classification_loss: Classification loss used by the second
stage classifier. Either losses.WeightedSigmoidClassificationLoss or
losses.WeightedSoftmaxClassificationLoss.
second_stage_mask_prediction_loss_weight: A float indicating the scale
factor for second stage mask prediction loss. This is applicable only if
second stage box predictor is configured to predict masks.
hard_example_miner: A losses.HardExampleMiner object (can be None).
parallel_iterations: (Optional) The number of iterations allowed to run
in parallel for calls to tf.map_fn.
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
clip_anchors_to_image: Normally, anchors generated for a given image size
are pruned during training if they lie outside the image window. This
option clips the anchors to be within the image instead of pruning.
use_static_shapes: If True, uses implementation of ops with static shape
guarantees.
resize_masks: Indicates whether the masks presend in the groundtruth
should be resized in the model with `image_resizer_fn`
freeze_batchnorm: Whether to freeze batch norm parameters in the first
stage box predictor during training or not. When training with a small
batch size (e.g. 1), it is desirable to freeze batch norm update and
use pretrained batch norm params.
return_raw_detections_during_predict: Whether to return raw detection
boxes in the predict() method. These are decoded boxes that have not
been through postprocessing (i.e. NMS). Default False.
output_final_box_features: Whether to output final box features. If true,
it crops the rpn feature map and passes it through box_classifier then
returns in the output dict as `detection_features`.
output_final_box_rpn_features: Whether to output rpn box features. If
true, it crops the rpn feature map and returns in the output dict as
`detection_features`.
Raises:
ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at
training time.
ValueError: If first_stage_anchor_generator is not of type
grid_anchor_generator.GridAnchorGenerator.
"""
# TODO(rathodv): add_summaries is currently unused. Respect that directive
# in the future.
super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes)
self._is_training = is_training
self._image_resizer_fn = image_resizer_fn
self._resize_masks = resize_masks
self._feature_extractor = feature_extractor
if isinstance(feature_extractor, FasterRCNNKerasFeatureExtractor):
# We delay building the feature extractor until it is used,
# to avoid creating the variables when a model is built just for data
# preprocessing. (This prevents a subtle bug where variable names are
# mismatched across workers, causing only one worker to be able to train)
self._feature_extractor_for_proposal_features = (
_UNINITIALIZED_FEATURE_EXTRACTOR)
self._feature_extractor_for_box_classifier_features = (
_UNINITIALIZED_FEATURE_EXTRACTOR)
else:
self._feature_extractor_for_proposal_features = None
self._feature_extractor_for_box_classifier_features = None
self._number_of_stages = number_of_stages
self._proposal_target_assigner = first_stage_target_assigner
self._detector_target_assigner = second_stage_target_assigner
# Both proposal and detector target assigners use the same box coder
self._box_coder = self._proposal_target_assigner.box_coder
# (First stage) Region proposal network parameters
self._first_stage_anchor_generator = first_stage_anchor_generator
self._first_stage_atrous_rate = first_stage_atrous_rate
self._first_stage_box_predictor_depth = first_stage_box_predictor_depth
self._first_stage_box_predictor_kernel_size = (
first_stage_box_predictor_kernel_size)
self._first_stage_minibatch_size = first_stage_minibatch_size
self._first_stage_sampler = first_stage_sampler
if isinstance(first_stage_box_predictor_arg_scope_fn,
hyperparams_builder.KerasLayerHyperparams):
num_anchors_per_location = (
self._first_stage_anchor_generator.num_anchors_per_location())
conv_hyperparams = (
first_stage_box_predictor_arg_scope_fn)
self._first_stage_box_predictor_first_conv = (
tf.keras.Sequential([
tf.keras.layers.Conv2D(
self._first_stage_box_predictor_depth,
kernel_size=[self._first_stage_box_predictor_kernel_size,
self._first_stage_box_predictor_kernel_size],
dilation_rate=self._first_stage_atrous_rate,
padding='SAME',
name='RPNConv',
**conv_hyperparams.params()),
conv_hyperparams.build_batch_norm(
(self._is_training and not freeze_batchnorm),
name='RPNBatchNorm'),
tf.keras.layers.Lambda(
tf.nn.relu6,
name='RPNActivation')
], name='FirstStageRPNFeatures'))
self._first_stage_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=self._is_training,
num_classes=1,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=False,
num_predictions_per_location_list=num_anchors_per_location,
use_dropout=False,
dropout_keep_prob=1.0,
box_code_size=self._box_coder.code_size,
kernel_size=1,
num_layers_before_predictor=0,
min_depth=0,
max_depth=0,
name=self.first_stage_box_predictor_scope))
else:
self._first_stage_box_predictor_arg_scope_fn = (
first_stage_box_predictor_arg_scope_fn)
def rpn_box_predictor_feature_extractor(single_rpn_features_to_crop):
with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()):
return slim.conv2d(
single_rpn_features_to_crop,
self._first_stage_box_predictor_depth,
kernel_size=[
self._first_stage_box_predictor_kernel_size,
self._first_stage_box_predictor_kernel_size
],
rate=self._first_stage_atrous_rate,
activation_fn=tf.nn.relu6,
scope='Conv',
reuse=tf.AUTO_REUSE)
self._first_stage_box_predictor_first_conv = (
rpn_box_predictor_feature_extractor)
self._first_stage_box_predictor = (
box_predictor_builder.build_convolutional_box_predictor(
is_training=self._is_training,
num_classes=1,
conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn,
use_dropout=False,
dropout_keep_prob=1.0,
box_code_size=self._box_coder.code_size,
kernel_size=1,
num_layers_before_predictor=0,
min_depth=0,
max_depth=0))
self._first_stage_nms_fn = first_stage_non_max_suppression_fn
self._first_stage_max_proposals = first_stage_max_proposals
self._use_static_shapes = use_static_shapes
self._first_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss())
self._first_stage_objectness_loss = (
losses.WeightedSoftmaxClassificationLoss())
self._first_stage_loc_loss_weight = first_stage_localization_loss_weight
self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight
# Per-region cropping parameters
self._crop_and_resize_fn = crop_and_resize_fn
self._initial_crop_size = initial_crop_size
self._maxpool_kernel_size = maxpool_kernel_size
self._maxpool_stride = maxpool_stride
# If max pooling is to be used, build the layer
if maxpool_kernel_size:
self._maxpool_layer = tf.keras.layers.MaxPooling2D(
[self._maxpool_kernel_size, self._maxpool_kernel_size],
strides=self._maxpool_stride,
name='MaxPool2D')
self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor
self._second_stage_batch_size = second_stage_batch_size
self._second_stage_sampler = second_stage_sampler
self._second_stage_nms_fn = second_stage_non_max_suppression_fn
self._second_stage_score_conversion_fn = second_stage_score_conversion_fn
self._second_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss())
self._second_stage_classification_loss = second_stage_classification_loss
self._second_stage_mask_loss = (
losses.WeightedSigmoidClassificationLoss())
self._second_stage_loc_loss_weight = second_stage_localization_loss_weight
self._second_stage_cls_loss_weight = second_stage_classification_loss_weight
self._second_stage_mask_loss_weight = (
second_stage_mask_prediction_loss_weight)
self._hard_example_miner = hard_example_miner
self._parallel_iterations = parallel_iterations
self.clip_anchors_to_image = clip_anchors_to_image
if self._number_of_stages <= 0 or self._number_of_stages > 3:
raise ValueError('Number of stages should be a value in {1, 2, 3}.')
self._batched_prediction_tensor_names = []
self._return_raw_detections_during_predict = (
return_raw_detections_during_predict)
self._output_final_box_features = output_final_box_features
self._output_final_box_rpn_features = output_final_box_rpn_features
@property
def first_stage_feature_extractor_scope(self):
return 'FirstStageFeatureExtractor'
@property
def second_stage_feature_extractor_scope(self):
return 'SecondStageFeatureExtractor'
@property
def first_stage_box_predictor_scope(self):
return 'FirstStageBoxPredictor'
@property
def second_stage_box_predictor_scope(self):
return 'SecondStageBoxPredictor'
@property
def max_num_proposals(self):
"""Max number of proposals (to pad to) for each image in the input batch.
At training time, this is set to be the `second_stage_batch_size` if hard
example miner is not configured, else it is set to
`first_stage_max_proposals`. At inference time, this is always set to
`first_stage_max_proposals`.
Returns:
A positive integer.
"""
if self._is_training and not self._hard_example_miner:
return self._second_stage_batch_size
return self._first_stage_max_proposals
@property
def anchors(self):
if not self._anchors:
raise RuntimeError('anchors have not been constructed yet!')
if not isinstance(self._anchors, box_list.BoxList):
raise RuntimeError('anchors should be a BoxList object, but is not.')
return self._anchors
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
@property
def feature_extractor(self):
return self._feature_extractor
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
See base class.
For Faster R-CNN, we perform image resizing in the base class --- each
class subclassing FasterRCNNMetaArch is responsible for any additional
preprocessing (e.g., scaling pixel values to be in [-1, 1]).
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
with tf.name_scope('Preprocessor'):
(resized_inputs,
true_image_shapes) = shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn)
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def _compute_clip_window(self, image_shapes):
"""Computes clip window for non max suppression based on image shapes.
This function assumes that the clip window's left top corner is at (0, 0).
Args:
image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing
shapes of images in the batch. Each row represents [height, width,
channels] of an image.
Returns:
A 2-D float32 tensor of shape [batch_size, 4] containing the clip window
for each image in the form [ymin, xmin, ymax, xmax].
"""
clip_heights = image_shapes[:, 0]
clip_widths = image_shapes[:, 1]
clip_window = tf.cast(
tf.stack([
tf.zeros_like(clip_heights),
tf.zeros_like(clip_heights), clip_heights, clip_widths
],
axis=1),
dtype=tf.float32)
return clip_window
def _proposal_postprocess(self, rpn_box_encodings,
rpn_objectness_predictions_with_background, anchors,
image_shape, true_image_shapes):
"""Wraps over FasterRCNNMetaArch._postprocess_rpn()."""
image_shape_2d = self._image_batch_shape_2d(image_shape)
proposal_boxes_normalized, _, _, num_proposals, _, _ = \
self._postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors, image_shape_2d, true_image_shapes)
return proposal_boxes_normalized, num_proposals
def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs):
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the
forward pass of the network to yield "raw" un-postprocessed predictions.
If `number_of_stages` is 1, this function only returns first stage
RPN predictions (un-postprocessed). Otherwise it returns both
first stage RPN predictions as well as second stage box classifier
predictions.
Other remarks:
+ Anchor pruning vs. clipping: following the recommendation of the Faster
R-CNN paper, we prune anchors that venture outside the image window at
training time and clip anchors to the image window at inference time.
+ Proposal padding: as described at the top of the file, proposals are
padded to self._max_num_proposals and flattened so that proposals from all
images within the input batch are arranged along the same batch dimension.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**side_inputs: additional tensors that are required by the network.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) rpn_box_predictor_features: A list of 4-D float32 tensor with shape
[batch_size, height_i, width_j, depth] to be used for predicting
proposal boxes and corresponding objectness scores.
2) rpn_features_to_crop: A list of 4-D float32 tensor with shape
[batch_size, height, width, depth] representing image features to crop
using the proposal boxes predicted by the RPN.
3) image_shape: a 1-D tensor of shape [4] representing the input
image shape.
4) rpn_box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
5) rpn_objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN (in absolute coordinates). Note that
`num_anchors` can differ depending on whether the model is created in
training or inference mode.
7) feature_maps: A single element list containing a 4-D float32 tensor
with shape batch_size, height, width, depth] representing the RPN
features to crop.
(and if number_of_stages > 1):
8) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using
a shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
9) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
10) num_proposals: An int32 tensor of shape [batch_size] representing
the number of proposals generated by the RPN. `num_proposals` allows
us to keep track of which entries are to be treated as zero paddings
and which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
11) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
12) mask_predictions: (optional) a 4-D tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask predictions.
13) raw_detection_boxes: (optional) a
[batch_size, self.max_num_proposals, num_classes, 4] float32 tensor
with detections prior to NMS in normalized coordinates.
14) raw_detection_feature_map_indices: (optional) a
[batch_size, self.max_num_proposals, num_classes] int32 tensor with
indices indicating which feature map each raw detection box was
produced from. The indices correspond to the elements in the
'feature_maps' field.
Raises:
ValueError: If `predict` is called before `preprocess`.
"""
prediction_dict = self._predict_first_stage(preprocessed_inputs)
if self._number_of_stages >= 2:
prediction_dict.update(
self._predict_second_stage(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['rpn_features_to_crop'],
prediction_dict['anchors'], prediction_dict['image_shape'],
true_image_shapes,
**side_inputs))
if self._number_of_stages == 3:
prediction_dict = self._predict_third_stage(prediction_dict,
true_image_shapes)
self._batched_prediction_tensor_names = [
x for x in prediction_dict if x not in ('image_shape', 'anchors')
]
return prediction_dict
def _predict_first_stage(self, preprocessed_inputs):
"""First stage of prediction.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) rpn_box_predictor_features: A list of 4-D float32/bfloat16 tensor
with shape [batch_size, height_i, width_j, depth] to be used for
predicting proposal boxes and corresponding objectness scores.
2) rpn_features_to_crop: A list of 4-D float32/bfloat16 tensor with
shape [batch_size, height, width, depth] representing image features
to crop using the proposal boxes predicted by the RPN.
3) image_shape: a 1-D tensor of shape [4] representing the input
image shape.
4) rpn_box_encodings: 3-D float32 tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
5) rpn_objectness_predictions_with_background: 3-D float32 tensor of
shape [batch_size, num_anchors, 2] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN (in absolute coordinates). Note that
`num_anchors` can differ depending on whether the model is created in
training or inference mode.
7) feature_maps: A single element list containing a 4-D float32 tensor
with shape batch_size, height, width, depth] representing the RPN
features to crop.
"""
(rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist,
image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)
(rpn_box_encodings, rpn_objectness_predictions_with_background
) = self._predict_rpn_proposals(rpn_box_predictor_features)
# The Faster R-CNN paper recommends pruning anchors that venture outside
# the image window at training time and clipping at inference time.
clip_window = tf.cast(tf.stack([0, 0, image_shape[1], image_shape[2]]),
dtype=tf.float32)
if self._is_training:
if self.clip_anchors_to_image:
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window, filter_nonoverlapping=False)
else:
(rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors_boxlist) = self._remove_invalid_anchors_and_predictions(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors_boxlist, clip_window)
else:
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window,
filter_nonoverlapping=not self._use_static_shapes)
self._anchors = anchors_boxlist
prediction_dict = {
'rpn_box_predictor_features':
rpn_box_predictor_features,
'rpn_features_to_crop':
rpn_features_to_crop,
'image_shape':
image_shape,
'rpn_box_encodings':
tf.cast(rpn_box_encodings, dtype=tf.float32),
'rpn_objectness_predictions_with_background':
tf.cast(rpn_objectness_predictions_with_background,
dtype=tf.float32),
'anchors':
anchors_boxlist.data['boxes'],
fields.PredictionFields.feature_maps: rpn_features_to_crop
}
return prediction_dict
def _image_batch_shape_2d(self, image_batch_shape_1d):
"""Takes a 1-D image batch shape tensor and converts it to a 2-D tensor.
Example:
If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D
image batch tensor would be [[300, 300, 3], [300, 300, 3]]
Args:
image_batch_shape_1d: 1-D tensor of the form [batch_size, height,
width, channels].
Returns:
image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is
of the form [height, width, channels].
"""
return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0),
[image_batch_shape_1d[0], 1])
def _predict_second_stage(self, rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop, anchors, image_shape,
true_image_shapes, **side_inputs):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_box_encodings: 3-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes.
rpn_objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
rpn_features_to_crop: A list of 4-D float32 or bfloat16 tensor with shape
[batch_size, height_i, width_i, depth] representing image features to
crop using the proposal boxes predicted by the RPN.
anchors: 2-D float tensor of shape
[num_anchors, self._box_coder.code_size].
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**side_inputs: additional tensors that are required by the network.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D float32 tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D float32 tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
5) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in normalized coordinates. Can be used to override the
boxes proposed by the RPN, thus enabling one to extract features and
get box classification and prediction for externally selected areas
of the image.
6) box_classifier_features: a 4-D float32/bfloat16 tensor
representing the features for each proposal.
If self._return_raw_detections_during_predict is True, the dictionary
will also contain:
7) raw_detection_boxes: a 4-D float32 tensor with shape
[batch_size, self.max_num_proposals, num_classes, 4] in normalized
coordinates.
8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape
[batch_size, self.max_num_proposals, num_classes].
"""
proposal_boxes_normalized, num_proposals = self._proposal_postprocess(
rpn_box_encodings, rpn_objectness_predictions_with_background, anchors,
image_shape, true_image_shapes)
prediction_dict = self._box_prediction(rpn_features_to_crop,
proposal_boxes_normalized,
image_shape, true_image_shapes,
**side_inputs)
prediction_dict['num_proposals'] = num_proposals
return prediction_dict
def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized,
image_shape, true_image_shapes, **side_inputs):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_features_to_crop: A list 4-D float32 or bfloat16 tensor with shape
[batch_size, height_i, width_i, depth] representing image features to
crop using the proposal boxes predicted by the RPN.
proposal_boxes_normalized: A float tensor with shape [batch_size,
max_num_proposals, 4] representing the (potentially zero padded)
proposal boxes for all images in the batch. These boxes are represented
as normalized coordinates.
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**side_inputs: additional tensors that are required by the network.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D float32 tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D float32 tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
4) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in normalized coordinates. Can be used to override the
boxes proposed by the RPN, thus enabling one to extract features and
get box classification and prediction for externally selected areas
of the image.
5) box_classifier_features: a 4-D float32/bfloat16 tensor
representing the features for each proposal.
If self._return_raw_detections_during_predict is True, the dictionary
will also contain:
6) raw_detection_boxes: a 4-D float32 tensor with shape
[batch_size, self.max_num_proposals, num_classes, 4] in normalized
coordinates.
7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape
[batch_size, self.max_num_proposals, num_classes].
8) final_anchors: a 3-D float tensor of shape [batch_size,
self.max_num_proposals, 4] containing the reference anchors for raw
detection boxes in normalized coordinates.
"""
flattened_proposal_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, proposal_boxes_normalized,
image_shape, **side_inputs))
box_classifier_features = self._extract_box_classifier_features(
flattened_proposal_feature_maps, **side_inputs)
if self._mask_rcnn_box_predictor.is_keras_model:
box_predictions = self._mask_rcnn_box_predictor(
[box_classifier_features],
prediction_stage=2)
else:
box_predictions = self._mask_rcnn_box_predictor.predict(
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=2)
refined_box_encodings = tf.squeeze(
box_predictions[box_predictor.BOX_ENCODINGS],
axis=1, name='all_refined_box_encodings')
class_predictions_with_background = tf.squeeze(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1, name='all_class_predictions_with_background')
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape, self._parallel_iterations)
prediction_dict = {
'refined_box_encodings': tf.cast(refined_box_encodings,
dtype=tf.float32),
'class_predictions_with_background':
tf.cast(class_predictions_with_background, dtype=tf.float32),
'proposal_boxes': absolute_proposal_boxes,
'box_classifier_features': box_classifier_features,
'proposal_boxes_normalized': proposal_boxes_normalized,
'final_anchors': proposal_boxes_normalized
}
if self._return_raw_detections_during_predict:
prediction_dict.update(self._raw_detections_and_feature_map_inds(
refined_box_encodings, absolute_proposal_boxes, true_image_shapes))
return prediction_dict
def _raw_detections_and_feature_map_inds(
self, refined_box_encodings, absolute_proposal_boxes, true_image_shapes):
"""Returns raw detections and feat map inds from where they originated.
Args:
refined_box_encodings: [total_num_proposals, num_classes,
self._box_coder.code_size] float32 tensor.
absolute_proposal_boxes: [batch_size, self.max_num_proposals, 4] float32
tensor representing decoded proposal bounding boxes in absolute
coordinates.
true_image_shapes: [batch, 3] int32 tensor where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
A dictionary with raw detection boxes, and the feature map indices from
which they originated.
"""
box_encodings_batch = tf.reshape(
refined_box_encodings,
[-1, self.max_num_proposals, refined_box_encodings.shape[1],
self._box_coder.code_size])
raw_detection_boxes_absolute = self._batch_decode_boxes(
box_encodings_batch, absolute_proposal_boxes)
raw_detection_boxes_normalized = shape_utils.static_or_dynamic_map_fn(
self._normalize_and_clip_boxes,
elems=[raw_detection_boxes_absolute, true_image_shapes],
dtype=tf.float32)
detection_feature_map_indices = tf.zeros_like(
raw_detection_boxes_normalized[:, :, :, 0], dtype=tf.int32)
return {
fields.PredictionFields.raw_detection_boxes:
raw_detection_boxes_normalized,
fields.PredictionFields.raw_detection_feature_map_indices:
detection_feature_map_indices
}
def _extract_box_classifier_features(self, flattened_feature_maps):
if self._feature_extractor_for_box_classifier_features == (
_UNINITIALIZED_FEATURE_EXTRACTOR):
self._feature_extractor_for_box_classifier_features = (
self._feature_extractor.get_box_classifier_feature_extractor_model(
name=self.second_stage_feature_extractor_scope))
if self._feature_extractor_for_box_classifier_features:
box_classifier_features = (
self._feature_extractor_for_box_classifier_features(
flattened_feature_maps))
else:
box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_feature_maps,
scope=self.second_stage_feature_extractor_scope))
return box_classifier_features
def _predict_third_stage(self, prediction_dict, image_shapes):
"""Predicts non-box, non-class outputs using refined detections.
For training, masks as predicted directly on the box_classifier_features,
which are region-features from the initial anchor boxes.
For inference, this happens after calling the post-processing stage, such
that masks are only calculated for the top scored boxes.
Args:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
5) box_classifier_features: a 4-D float32 tensor representing the
features for each proposal.
6) image_shape: a 1-D tensor of shape [4] representing the input
image shape.
image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing
shapes of images in the batch.
Returns:
prediction_dict: a dictionary that in addition to the input predictions
does hold the following predictions as well:
1) mask_predictions: a 4-D tensor with shape
[batch_size, max_detection, mask_height, mask_width] containing
instance mask predictions.
"""
if self._is_training:
curr_box_classifier_features = prediction_dict['box_classifier_features']
detection_classes = prediction_dict['class_predictions_with_background']
if self._mask_rcnn_box_predictor.is_keras_model:
mask_predictions = self._mask_rcnn_box_predictor(
[curr_box_classifier_features],
prediction_stage=3)
else:
mask_predictions = self._mask_rcnn_box_predictor.predict(
[curr_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=3)
prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
else:
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
image_shapes)
prediction_dict.update(detections_dict)
detection_boxes = detections_dict[
fields.DetectionResultFields.detection_boxes]
detection_classes = detections_dict[
fields.DetectionResultFields.detection_classes]
rpn_features_to_crop = prediction_dict['rpn_features_to_crop']
image_shape = prediction_dict['image_shape']
batch_size = tf.shape(detection_boxes)[0]
max_detection = tf.shape(detection_boxes)[1]
flattened_detected_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, detection_boxes, image_shape))
curr_box_classifier_features = self._extract_box_classifier_features(
flattened_detected_feature_maps)
if self._mask_rcnn_box_predictor.is_keras_model:
mask_predictions = self._mask_rcnn_box_predictor(
[curr_box_classifier_features],
prediction_stage=3)
else:
mask_predictions = self._mask_rcnn_box_predictor.predict(
[curr_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=3)
detection_masks = tf.squeeze(mask_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
_, num_classes, mask_height, mask_width = (
detection_masks.get_shape().as_list())
_, max_detection = detection_classes.get_shape().as_list()
prediction_dict['mask_predictions'] = tf.reshape(
detection_masks, [-1, num_classes, mask_height, mask_width])
if num_classes > 1:
detection_masks = self._gather_instance_masks(
detection_masks, detection_classes)
detection_masks = tf.cast(detection_masks, tf.float32)
prediction_dict[fields.DetectionResultFields.detection_masks] = (
tf.reshape(tf.sigmoid(detection_masks),
[batch_size, max_detection, mask_height, mask_width]))
return prediction_dict
def _gather_instance_masks(self, instance_masks, classes):
"""Gathers the masks that correspond to classes.
Args:
instance_masks: A 4-D float32 tensor with shape
[K, num_classes, mask_height, mask_width].
classes: A 2-D int32 tensor with shape [batch_size, max_detection].
Returns:
masks: a 3-D float32 tensor with shape [K, mask_height, mask_width].
"""
_, num_classes, height, width = instance_masks.get_shape().as_list()
k = tf.shape(instance_masks)[0]
instance_masks = tf.reshape(instance_masks, [-1, height, width])
classes = tf.cast(tf.reshape(classes, [-1]), dtype=tf.int32)
gather_idx = tf.range(k) * num_classes + classes
return tf.gather(instance_masks, gather_idx)
def _extract_rpn_feature_maps(self, preprocessed_inputs):
"""Extracts RPN features.
This function extracts two feature maps: a feature map to be directly
fed to a box predictor (to predict location and objectness scores for
proposals) and a feature map from which to crop regions which will then
be sent to the second stage box classifier.
Args:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
Returns:
rpn_box_predictor_features: A list of 4-D float32 tensor with shape
[batch, height_i, width_j, depth] to be used for predicting proposal
boxes and corresponding objectness scores.
rpn_features_to_crop: A list of 4-D float32 tensor with shape
[batch, height, width, depth] representing image features to crop using
the proposals boxes.
anchors: A list of BoxList representing anchors (for the RPN) in
absolute coordinates.
image_shape: A 1-D tensor representing the input image shape.
"""
image_shape = tf.shape(preprocessed_inputs)
rpn_features_to_crop, self.endpoints = self._extract_proposal_features(
preprocessed_inputs)
# Decide if rpn_features_to_crop is a list. If not make it a list
if not isinstance(rpn_features_to_crop, list):
rpn_features_to_crop = [rpn_features_to_crop]
feature_map_shapes = []
rpn_box_predictor_features = []
for single_rpn_features_to_crop in rpn_features_to_crop:
single_shape = tf.shape(single_rpn_features_to_crop)
feature_map_shapes.append((single_shape[1], single_shape[2]))
single_rpn_box_predictor_features = (
self._first_stage_box_predictor_first_conv(
single_rpn_features_to_crop))
rpn_box_predictor_features.append(single_rpn_box_predictor_features)
anchors = box_list_ops.concatenate(
self._first_stage_anchor_generator.generate(feature_map_shapes))
return (rpn_box_predictor_features, rpn_features_to_crop,
anchors, image_shape)
def _extract_proposal_features(self, preprocessed_inputs):
if self._feature_extractor_for_proposal_features == (
_UNINITIALIZED_FEATURE_EXTRACTOR):
self._feature_extractor_for_proposal_features = (
self._feature_extractor.get_proposal_feature_extractor_model(
name=self.first_stage_feature_extractor_scope))
if self._feature_extractor_for_proposal_features:
proposal_features = (
self._feature_extractor_for_proposal_features(preprocessed_inputs),
{})
else:
proposal_features = (
self._feature_extractor.extract_proposal_features(
preprocessed_inputs,
scope=self.first_stage_feature_extractor_scope))
return proposal_features
def _predict_rpn_proposals(self, rpn_box_predictor_features):
"""Adds box predictors to RPN feature map to predict proposals.
Note resulting tensors will not have been postprocessed.
Args:
rpn_box_predictor_features: A list of 4-D float32 tensor with shape
[batch, height_i, width_j, depth] to be used for predicting proposal
boxes and corresponding objectness scores.
Returns:
box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
Raises:
RuntimeError: if the anchor generator generates anchors corresponding to
multiple feature maps. We currently assume that a single feature map
is generated for the RPN.
"""
num_anchors_per_location = (
self._first_stage_anchor_generator.num_anchors_per_location())
if self._first_stage_box_predictor.is_keras_model:
box_predictions = self._first_stage_box_predictor(
rpn_box_predictor_features)
else:
box_predictions = self._first_stage_box_predictor.predict(
rpn_box_predictor_features,
num_anchors_per_location,
scope=self.first_stage_box_predictor_scope)
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (tf.squeeze(box_encodings, axis=2),
objectness_predictions_with_background)
def _remove_invalid_anchors_and_predictions(
self,
box_encodings,
objectness_predictions_with_background,
anchors_boxlist,
clip_window):
"""Removes anchors that (partially) fall outside an image.
Also removes associated box encodings and objectness predictions.
Args:
box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN)
in absolute coordinates.
clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax]
extent of the window to clip/prune to.
Returns:
box_encodings: 4-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes, where num_valid_anchors <= num_anchors
objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors, where
num_valid_anchors <= num_anchors. Note that this
tensor *includes* background class predictions (at class index 0).
anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in
absolute coordinates.
"""
pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window(
anchors_boxlist, clip_window)
def _batch_gather_kept_indices(predictions_tensor):
return shape_utils.static_or_dynamic_map_fn(
functools.partial(tf.gather, indices=keep_indices),
elems=predictions_tensor,
dtype=tf.float32,
parallel_iterations=self._parallel_iterations,
back_prop=True)
return (_batch_gather_kept_indices(box_encodings),
_batch_gather_kept_indices(objectness_predictions_with_background),
pruned_anchors_boxlist)
def _flatten_first_two_dimensions(self, inputs):
"""Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.
Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
[A * B, ..., depth].
Args:
inputs: A float tensor with shape [A, B, ..., depth]. Note that the first
two and last dimensions must be statically defined.
Returns:
A float tensor with shape [A * B, ..., depth] (where the first and last
dimension are statically defined.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
combined_shape[2:])
return tf.reshape(inputs, flattened_shape)
def postprocess(self, prediction_dict, true_image_shapes):
"""Convert prediction tensors to final detections.
This function converts raw predictions tensors to final detection results.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_converter is used,
then scores are remapped (and may thus have a different interpretation).
If number_of_stages=1, the returned results represent proposals from the
first stage RPN and are padded to have self.max_num_proposals for each
image; otherwise, the results can be interpreted as multiclass detections
from the full two-stage model and are padded to self._max_detections.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
and `anchors` fields. Otherwise we expect prediction_dict to
additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`,
`proposal_boxes` and, optionally, `mask_predictions` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detection, 4]
detection_scores: [batch, max_detections]
detection_multiclass_scores: [batch, max_detections, 2]
detection_anchor_indices: [batch, max_detections]
detection_classes: [batch, max_detections]
(this entry is only created if rpn_mode=False)
num_detections: [batch]
raw_detection_boxes: [batch, total_detections, 4]
raw_detection_scores: [batch, total_detections, num_classes + 1]
Raises:
ValueError: If `predict` is called before `preprocess`.
ValueError: If `_output_final_box_features` is true but
rpn_features_to_crop is not in the prediction_dict.
"""
with tf.name_scope('FirstStagePostprocessor'):
if self._number_of_stages == 1:
image_shapes = self._image_batch_shape_2d(
prediction_dict['image_shape'])
(proposal_boxes, proposal_scores, proposal_multiclass_scores,
num_proposals, raw_proposal_boxes,
raw_proposal_scores) = self._postprocess_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'], image_shapes, true_image_shapes)
return {
fields.DetectionResultFields.detection_boxes:
proposal_boxes,
fields.DetectionResultFields.detection_scores:
proposal_scores,
fields.DetectionResultFields.detection_multiclass_scores:
proposal_multiclass_scores,
fields.DetectionResultFields.num_detections:
tf.cast(num_proposals, dtype=tf.float32),
fields.DetectionResultFields.raw_detection_boxes:
raw_proposal_boxes,
fields.DetectionResultFields.raw_detection_scores:
raw_proposal_scores
}
# TODO(jrru): Remove mask_predictions from _post_process_box_classifier.
if (self._number_of_stages == 2 or
(self._number_of_stages == 3 and self._is_training)):
with tf.name_scope('SecondStagePostprocessor'):
mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS)
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
true_image_shapes,
mask_predictions=mask_predictions)
if self._output_final_box_features:
if 'rpn_features_to_crop' not in prediction_dict:
raise ValueError(
'Please make sure rpn_features_to_crop is in the prediction_dict.'
)
detections_dict[
'detection_features'] = (
self._add_detection_box_boxclassifier_features_output_node(
detections_dict[
fields.DetectionResultFields.detection_boxes],
prediction_dict['rpn_features_to_crop'],
prediction_dict['image_shape']))
if self._output_final_box_rpn_features:
if 'rpn_features_to_crop' not in prediction_dict:
raise ValueError(
'Please make sure rpn_features_to_crop is in the prediction_dict.'
)
detections_dict['cropped_rpn_box_features'] = (
self._add_detection_box_rpn_features_output_node(
detections_dict[fields.DetectionResultFields.detection_boxes],
prediction_dict['rpn_features_to_crop'],
prediction_dict['image_shape']))
return detections_dict
if self._number_of_stages == 3:
# Post processing is already performed in 3rd stage. We need to transfer
# postprocessed tensors from `prediction_dict` to `detections_dict`.
# Remove any items from the prediction dictionary if they are not pure
# Tensors.
non_tensor_predictions = [
k for k, v in prediction_dict.items() if not isinstance(v, tf.Tensor)]
for k in non_tensor_predictions:
tf.logging.info('Removing {0} from prediction_dict'.format(k))
prediction_dict.pop(k)
return prediction_dict
def _add_detection_box_boxclassifier_features_output_node(
self, detection_boxes, rpn_features_to_crop, image_shape):
"""Add detection features to outputs.
This function extracts box features for each box in rpn_features_to_crop.
It returns the extracted box features, reshaped to
[batch size, max_detections, height, width, depth], and average pools
the extracted features across the spatial dimensions and adds a graph node
to the pooled features named 'pooled_detection_features'
Args:
detection_boxes: a 3-D float32 tensor of shape
[batch_size, max_detections, 4] which represents the bounding boxes.
rpn_features_to_crop: A list of 4-D float32 tensor with shape
[batch, height, width, depth] representing image features to crop using
the proposals boxes.
image_shape: a 1-D tensor of shape [4] representing the image shape.
Returns:
detection_features: a 4-D float32 tensor of shape
[batch size, max_detections, height, width, depth] representing
cropped image features
"""
with tf.name_scope('SecondStageDetectionFeaturesExtract'):
flattened_detected_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, detection_boxes, image_shape))
detection_features_unpooled = self._extract_box_classifier_features(
flattened_detected_feature_maps)
batch_size = tf.shape(detection_boxes)[0]
max_detections = tf.shape(detection_boxes)[1]
detection_features_pool = tf.reduce_mean(
detection_features_unpooled, axis=[1, 2])
reshaped_detection_features_pool = tf.reshape(
detection_features_pool,
[batch_size, max_detections, tf.shape(detection_features_pool)[-1]])
reshaped_detection_features_pool = tf.identity(
reshaped_detection_features_pool, 'pooled_detection_features')
# TODO(sbeery) add node to extract rpn features here!!
reshaped_detection_features = tf.reshape(
detection_features_unpooled,
[batch_size, max_detections,
tf.shape(detection_features_unpooled)[1],
tf.shape(detection_features_unpooled)[2],
tf.shape(detection_features_unpooled)[3]])
return reshaped_detection_features
def _add_detection_box_rpn_features_output_node(self, detection_boxes,
rpn_features_to_crop,
image_shape):
"""Add detection features to outputs.
This function extracts box features for each box in rpn_features_to_crop.
It returns the extracted box features, reshaped to
[batch size, max_detections, height, width, depth]
Args:
detection_boxes: a 3-D float32 tensor of shape
[batch_size, max_detections, 4] which represents the bounding boxes.
rpn_features_to_crop: A list of 4-D float32 tensor with shape
[batch, height, width, depth] representing image features to crop using
the proposals boxes.
image_shape: a 1-D tensor of shape [4] representing the image shape.
Returns:
detection_features: a 4-D float32 tensor of shape
[batch size, max_detections, height, width, depth] representing
cropped image features
"""
with tf.name_scope('FirstStageDetectionFeaturesExtract'):
flattened_detected_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, detection_boxes, image_shape))
batch_size = tf.shape(detection_boxes)[0]
max_detections = tf.shape(detection_boxes)[1]
reshaped_detection_features = tf.reshape(
flattened_detected_feature_maps,
[batch_size, max_detections,
tf.shape(flattened_detected_feature_maps)[1],
tf.shape(flattened_detected_feature_maps)[2],
tf.shape(flattened_detected_feature_maps)[3]])
return reshaped_detection_features
def _postprocess_rpn(self,
rpn_box_encodings_batch,
rpn_objectness_predictions_with_background_batch,
anchors,
image_shapes,
true_image_shapes):
"""Converts first stage prediction tensors from the RPN to proposals.
This function decodes the raw RPN predictions, runs non-max suppression
on the result.
Note that the behavior of this function is slightly modified during
training --- specifically, we stop the gradient from passing through the
proposal boxes and we only return a balanced sampled subset of proposals
with size `second_stage_batch_size`.
Args:
rpn_box_encodings_batch: A 3-D float32 tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted proposal box encodings.
rpn_objectness_predictions_with_background_batch: A 3-D float tensor of
shape [batch_size, num_anchors, 2] containing objectness predictions
(logits) for each of the anchors with 0 corresponding to background
and 1 corresponding to object.
anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN. Note that `num_anchors` can differ depending
on whether the model is created in training or inference mode.
image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of
images in the batch.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
proposal_boxes: A float tensor with shape
[batch_size, max_num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented as normalized coordinates.
proposal_scores: A float tensor with shape
[batch_size, max_num_proposals] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
proposal_multiclass_scores: A float tensor with shape
[batch_size, max_num_proposals, 2] representing the (potentially zero
padded) proposal multiclass scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
raw_detection_boxes: [batch, total_detections, 4] tensor with decoded
proposal boxes before Non-Max Suppression.
raw_detection_scores: [batch, total_detections,
num_classes_with_background] tensor of multi-class scores for raw
proposal boxes.
"""
rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)
rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(
rpn_box_encodings_batch)
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])
proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch,
tiled_anchor_boxes)
raw_proposal_boxes = tf.squeeze(proposal_boxes, axis=2)
rpn_objectness_softmax = tf.nn.softmax(
rpn_objectness_predictions_with_background_batch)
rpn_objectness_softmax_without_background = rpn_objectness_softmax[:, :, 1]
clip_window = self._compute_clip_window(true_image_shapes)
additional_fields = {'multiclass_scores': rpn_objectness_softmax}
(proposal_boxes, proposal_scores, _, _, nmsed_additional_fields,
num_proposals) = self._first_stage_nms_fn(
tf.expand_dims(raw_proposal_boxes, axis=2),
tf.expand_dims(rpn_objectness_softmax_without_background, axis=2),
additional_fields=additional_fields,
clip_window=clip_window)
if self._is_training:
proposal_boxes = tf.stop_gradient(proposal_boxes)
if not self._hard_example_miner:
(groundtruth_boxlists, groundtruth_classes_with_background_list, _,
groundtruth_weights_list
) = self._format_groundtruth_data(image_shapes)
(proposal_boxes, proposal_scores,
num_proposals) = self._sample_box_classifier_batch(
proposal_boxes, proposal_scores, num_proposals,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list)
# normalize proposal boxes
def normalize_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(
normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32)
raw_normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(
normalize_boxes,
elems=[raw_proposal_boxes, image_shapes],
dtype=tf.float32)
proposal_multiclass_scores = (
nmsed_additional_fields.get('multiclass_scores')
if nmsed_additional_fields else None)
return (normalized_proposal_boxes, proposal_scores,
proposal_multiclass_scores, num_proposals,
raw_normalized_proposal_boxes, rpn_objectness_softmax)
def _sample_box_classifier_batch(
self,
proposal_boxes,
proposal_scores,
num_proposals,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list):
"""Samples a minibatch for second stage.
Args:
proposal_boxes: A float tensor with shape
[batch_size, num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented in absolute coordinates.
proposal_scores: A float tensor with shape
[batch_size, num_proposals] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates
of the groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes]
indicating the weight associated with the groundtruth boxes.
Returns:
proposal_boxes: A float tensor with shape
[batch_size, second_stage_batch_size, 4] representing the (potentially
zero padded) proposal boxes for all images in the batch. These boxes
are represented in absolute coordinates.
proposal_scores: A float tensor with shape
[batch_size, second_stage_batch_size] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
"""
single_image_proposal_box_sample = []
single_image_proposal_score_sample = []
single_image_num_proposals_sample = []
for (single_image_proposal_boxes,
single_image_proposal_scores,
single_image_num_proposals,
single_image_groundtruth_boxlist,
single_image_groundtruth_classes_with_background,
single_image_groundtruth_weights) in zip(
tf.unstack(proposal_boxes),
tf.unstack(proposal_scores),
tf.unstack(num_proposals),
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list):
single_image_boxlist = box_list.BoxList(single_image_proposal_boxes)
single_image_boxlist.add_field(fields.BoxListFields.scores,
single_image_proposal_scores)
sampled_boxlist = self._sample_box_classifier_minibatch_single_image(
single_image_boxlist,
single_image_num_proposals,
single_image_groundtruth_boxlist,
single_image_groundtruth_classes_with_background,
single_image_groundtruth_weights)
sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list(
sampled_boxlist,
num_boxes=self._second_stage_batch_size)
single_image_num_proposals_sample.append(tf.minimum(
sampled_boxlist.num_boxes(),
self._second_stage_batch_size))
bb = sampled_padded_boxlist.get()
single_image_proposal_box_sample.append(bb)
single_image_proposal_score_sample.append(
sampled_padded_boxlist.get_field(fields.BoxListFields.scores))
return (tf.stack(single_image_proposal_box_sample),
tf.stack(single_image_proposal_score_sample),
tf.stack(single_image_num_proposals_sample))
def _format_groundtruth_data(self, image_shapes):
"""Helper function for preparing groundtruth data for target assignment.
In order to be consistent with the model.DetectionModel interface,
groundtruth boxes are specified in normalized coordinates and classes are
specified as label indices with no assumed background category. To prepare
for target assignment, we:
1) convert boxes to absolute coordinates,
2) add a background class at class index 0
3) groundtruth instance masks, if available, are resized to match
image_shape.
Args:
image_shapes: a 2-D int32 tensor of shape [batch_size, 3] containing
shapes of input image in the batch.
Returns:
groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates
of the groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of
shape [num_boxes, image_height, image_width] containing instance masks.
This is set to None if no masks exist in the provided groundtruth.
"""
# pylint: disable=g-complex-comprehension
groundtruth_boxlists = [
box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shapes[i, 0], image_shapes[i, 1])
for i, boxes in enumerate(
self.groundtruth_lists(fields.BoxListFields.boxes))
]
groundtruth_classes_with_background_list = []
for one_hot_encoding in self.groundtruth_lists(
fields.BoxListFields.classes):
groundtruth_classes_with_background_list.append(
tf.cast(
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'),
dtype=tf.float32))
groundtruth_masks_list = self._groundtruth_lists.get(
fields.BoxListFields.masks)
# TODO(rathodv): Remove mask resizing once the legacy pipeline is deleted.
if groundtruth_masks_list is not None and self._resize_masks:
resized_masks_list = []
for mask in groundtruth_masks_list:
_, resized_mask, _ = self._image_resizer_fn(
# Reuse the given `image_resizer_fn` to resize groundtruth masks.
# `mask` tensor for an image is of the shape [num_masks,
# image_height, image_width]. Below we create a dummy image of the
# the shape [image_height, image_width, 1] to use with
# `image_resizer_fn`.
image=tf.zeros(tf.stack([tf.shape(mask)[1],
tf.shape(mask)[2], 1])),
masks=mask)
resized_masks_list.append(resized_mask)
groundtruth_masks_list = resized_masks_list
# Masks could be set to bfloat16 in the input pipeline for performance
# reasons. Convert masks back to floating point space here since the rest of
# this module assumes groundtruth to be of float32 type.
float_groundtruth_masks_list = []
if groundtruth_masks_list:
for mask in groundtruth_masks_list:
float_groundtruth_masks_list.append(tf.cast(mask, tf.float32))
groundtruth_masks_list = float_groundtruth_masks_list
if self.groundtruth_has_field(fields.BoxListFields.weights):
groundtruth_weights_list = self.groundtruth_lists(
fields.BoxListFields.weights)
else:
# Set weights for all batch elements equally to 1.0
groundtruth_weights_list = []
for groundtruth_classes in groundtruth_classes_with_background_list:
num_gt = tf.shape(groundtruth_classes)[0]
groundtruth_weights = tf.ones(num_gt)
groundtruth_weights_list.append(groundtruth_weights)
return (groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list)
def _sample_box_classifier_minibatch_single_image(
self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist,
groundtruth_classes_with_background, groundtruth_weights):
"""Samples a mini-batch of proposals to be sent to the box classifier.
Helper function for self._postprocess_rpn.
Args:
proposal_boxlist: A BoxList containing K proposal boxes in absolute
coordinates.
num_valid_proposals: Number of valid proposals in the proposal boxlist.
groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in
absolute coordinates.
groundtruth_classes_with_background: A tensor with shape
`[N, self.num_classes + 1]` representing groundtruth classes. The
classes are assumed to be k-hot encoded, and include background as the
zero-th class.
groundtruth_weights: Weights attached to the groundtruth_boxes.
Returns:
a BoxList contained sampled proposals.
"""
(cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(
proposal_boxlist,
groundtruth_boxlist,
groundtruth_classes_with_background,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
groundtruth_weights=groundtruth_weights)
# Selects all boxes as candidates if none of them is selected according
# to cls_weights. This could happen as boxes within certain IOU ranges
# are ignored. If triggered, the selected boxes will still be ignored
# during loss computation.
cls_weights = tf.reduce_mean(cls_weights, axis=-1)
positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)
valid_indicator = tf.logical_and(
tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals,
cls_weights > 0
)
selected_positions = self._second_stage_sampler.subsample(
valid_indicator,
self._second_stage_batch_size,
positive_indicator)
return box_list_ops.boolean_mask(
proposal_boxlist,
selected_positions,
use_static_shapes=self._use_static_shapes,
indicator_sum=(self._second_stage_batch_size
if self._use_static_shapes else None))
def _compute_second_stage_input_feature_maps(self, features_to_crop,
proposal_boxes_normalized,
image_shape,
**side_inputs):
"""Crops to a set of proposals from the feature map for a batch of images.
Helper function for self._postprocess_rpn. This function calls
`tf.image.crop_and_resize` to create the feature map to be passed to the
second stage box classifier for each proposal.
Args:
features_to_crop: A float32 tensor with shape
[batch_size, height, width, depth]
proposal_boxes_normalized: A float32 tensor with shape [batch_size,
num_proposals, box_code_size] containing proposal boxes in
normalized coordinates.
image_shape: A 1D int32 tensors of size [4] containing the image shape.
**side_inputs: additional tensors that are required by the network.
Returns:
A float32 tensor with shape [K, new_height, new_width, depth].
"""
num_levels = len(features_to_crop)
box_levels = None
if num_levels != 1:
# If there are multiple levels to select, get the box levels
# unit_scale_index: num_levels-2 is chosen based on section 4.2 of
# https://arxiv.org/pdf/1612.03144.pdf and works best for Resnet based
# feature extractor.
box_levels = ops.fpn_feature_levels(
num_levels, num_levels - 2,
tf.sqrt(tf.cast(image_shape[1] * image_shape[2], tf.float32)) / 224.0,
proposal_boxes_normalized)
cropped_regions = self._flatten_first_two_dimensions(
self._crop_and_resize_fn(
features_to_crop, proposal_boxes_normalized, box_levels,
[self._initial_crop_size, self._initial_crop_size]))
return self._maxpool_layer(cropped_regions)
def _postprocess_box_classifier(self,
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
image_shapes,
mask_predictions=None):
"""Converts predictions from the second stage box classifier to detections.
Args:
refined_box_encodings: a 3-D float tensor with shape
[total_num_padded_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings. If using a shared
box across classes the shape will instead be
[total_num_padded_proposals, 1, 4]
class_predictions_with_background: a 2-D tensor float with shape
[total_num_padded_proposals, num_classes + 1] containing class
predictions (logits) for each of the proposals. Note that this tensor
*includes* background class predictions (at class index 0).
proposal_boxes: a 3-D float tensor with shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in absolute coordinates.
num_proposals: a 1-D int32 tensor of shape [batch] representing the number
of proposals predicted for each image in the batch.
image_shapes: a 2-D int32 tensor containing shapes of input image in the
batch.
mask_predictions: (optional) a 4-D float tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask prediction logits.
Returns:
A dictionary containing:
`detection_boxes`: [batch, max_detection, 4] in normalized co-ordinates.
`detection_scores`: [batch, max_detections]
`detection_multiclass_scores`: [batch, max_detections,
num_classes_with_background] tensor with class score distribution for
post-processed detection boxes including background class if any.
`detection_anchor_indices`: [batch, max_detections] with anchor
indices.
`detection_classes`: [batch, max_detections]
`num_detections`: [batch]
`detection_masks`:
(optional) [batch, max_detections, mask_height, mask_width]. Note
that a pixel-wise sigmoid score converter is applied to the detection
masks.
`raw_detection_boxes`: [batch, total_detections, 4] tensor with decoded
detection boxes in normalized coordinates, before Non-Max Suppression.
The value total_detections is the number of second stage anchors
(i.e. the total number of boxes before NMS).
`raw_detection_scores`: [batch, total_detections,
num_classes_with_background] tensor of multi-class scores for
raw detection boxes. The value total_detections is the number of
second stage anchors (i.e. the total number of boxes before NMS).
"""
refined_box_encodings_batch = tf.reshape(
refined_box_encodings,
[-1,
self.max_num_proposals,
refined_box_encodings.shape[1],
self._box_coder.code_size])
class_predictions_with_background_batch = tf.reshape(
class_predictions_with_background,
[-1, self.max_num_proposals, self.num_classes + 1]
)
refined_decoded_boxes_batch = self._batch_decode_boxes(
refined_box_encodings_batch, proposal_boxes)
class_predictions_with_background_batch_normalized = (
self._second_stage_score_conversion_fn(
class_predictions_with_background_batch))
class_predictions_batch = tf.reshape(
tf.slice(class_predictions_with_background_batch_normalized,
[0, 0, 1], [-1, -1, -1]),
[-1, self.max_num_proposals, self.num_classes])
clip_window = self._compute_clip_window(image_shapes)
mask_predictions_batch = None
if mask_predictions is not None:
mask_height = shape_utils.get_dim_as_int(mask_predictions.shape[2])
mask_width = shape_utils.get_dim_as_int(mask_predictions.shape[3])
mask_predictions = tf.sigmoid(mask_predictions)
mask_predictions_batch = tf.reshape(
mask_predictions, [-1, self.max_num_proposals,
self.num_classes, mask_height, mask_width])
batch_size = shape_utils.combined_static_and_dynamic_shape(
refined_box_encodings_batch)[0]
batch_anchor_indices = tf.tile(
tf.expand_dims(tf.range(self.max_num_proposals), 0),
multiples=[batch_size, 1])
additional_fields = {
'multiclass_scores': class_predictions_with_background_batch_normalized,
'anchor_indices': tf.cast(batch_anchor_indices, tf.float32)
}
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections) = self._second_stage_nms_fn(
refined_decoded_boxes_batch,
class_predictions_batch,
clip_window=clip_window,
change_coordinate_frame=True,
num_valid_boxes=num_proposals,
additional_fields=additional_fields,
masks=mask_predictions_batch)
if refined_decoded_boxes_batch.shape[2] > 1:
class_ids = tf.expand_dims(
tf.argmax(class_predictions_with_background_batch[:, :, 1:], axis=2,
output_type=tf.int32),
axis=-1)
raw_detection_boxes = tf.squeeze(
tf.batch_gather(refined_decoded_boxes_batch, class_ids), axis=2)
else:
raw_detection_boxes = tf.squeeze(refined_decoded_boxes_batch, axis=2)
raw_normalized_detection_boxes = shape_utils.static_or_dynamic_map_fn(
self._normalize_and_clip_boxes,
elems=[raw_detection_boxes, image_shapes],
dtype=tf.float32)
detections = {
fields.DetectionResultFields.detection_boxes:
nmsed_boxes,
fields.DetectionResultFields.detection_scores:
nmsed_scores,
fields.DetectionResultFields.detection_classes:
nmsed_classes,
fields.DetectionResultFields.detection_multiclass_scores:
nmsed_additional_fields['multiclass_scores'],
fields.DetectionResultFields.detection_anchor_indices:
tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32),
fields.DetectionResultFields.num_detections:
tf.cast(num_detections, dtype=tf.float32),
fields.DetectionResultFields.raw_detection_boxes:
raw_normalized_detection_boxes,
fields.DetectionResultFields.raw_detection_scores:
class_predictions_with_background_batch_normalized
}
if nmsed_masks is not None:
detections[fields.DetectionResultFields.detection_masks] = nmsed_masks
return detections
def _batch_decode_boxes(self, box_encodings, anchor_boxes):
"""Decodes box encodings with respect to the anchor boxes.
Args:
box_encodings: a 4-D tensor with shape
[batch_size, num_anchors, num_classes, self._box_coder.code_size]
representing box encodings.
anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size]
representing decoded bounding boxes. If using a shared box across
classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
Returns:
decoded_boxes: a
[batch_size, num_anchors, num_classes, self._box_coder.code_size]
float tensor representing bounding box predictions (for each image in
batch, proposal and class). If using a shared box across classes the
shape will instead be
[batch_size, num_anchors, 1, self._box_coder.code_size].
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
num_classes = combined_shape[2]
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
decoded_boxes = self._box_coder.decode(
tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
tiled_anchors_boxlist)
return tf.reshape(decoded_boxes.get(),
tf.stack([combined_shape[0], combined_shape[1],
num_classes, 4]))
def _normalize_and_clip_boxes(self, boxes_and_image_shape):
"""Normalize and clip boxes."""
boxes_per_image = boxes_and_image_shape[0]
image_shape = boxes_and_image_shape[1]
boxes_contains_classes_dim = boxes_per_image.shape.ndims == 3
if boxes_contains_classes_dim:
boxes_per_image = shape_utils.flatten_first_n_dimensions(
boxes_per_image, 2)
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(boxes_per_image),
image_shape[0],
image_shape[1],
check_range=False).get()
normalized_boxes_per_image = box_list_ops.clip_to_window(
box_list.BoxList(normalized_boxes_per_image),
tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32),
filter_nonoverlapping=False).get()
if boxes_contains_classes_dim:
max_num_proposals, num_classes, _ = (
shape_utils.combined_static_and_dynamic_shape(
boxes_and_image_shape[0]))
normalized_boxes_per_image = shape_utils.expand_first_dimension(
normalized_boxes_per_image, [max_num_proposals, num_classes])
return normalized_boxes_per_image
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Compute scalar loss tensors given prediction tensors.
If number_of_stages=1, only RPN related losses are computed (i.e.,
`rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all
losses are computed.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
`image_shape`, and `anchors` fields. Otherwise we expect
prediction_dict to additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`, and
`proposal_boxes` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`first_stage_localization_loss`,
`first_stage_objectness_loss`, 'second_stage_localization_loss',
'second_stage_classification_loss') to scalar tensors representing
corresponding loss values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
(groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list
) = self._format_groundtruth_data(
self._image_batch_shape_2d(prediction_dict['image_shape']))
loss_dict = self._loss_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'], groundtruth_boxlists,
groundtruth_classes_with_background_list, groundtruth_weights_list)
if self._number_of_stages > 1:
loss_dict.update(
self._loss_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'], groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list, prediction_dict['image_shape'],
prediction_dict.get('mask_predictions'), groundtruth_masks_list,
prediction_dict.get(
fields.DetectionResultFields.detection_boxes),
prediction_dict.get(
fields.DetectionResultFields.num_detections)))
return loss_dict
def _loss_rpn(self, rpn_box_encodings,
rpn_objectness_predictions_with_background, anchors,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list):
"""Computes scalar RPN loss tensors.
Uses self._proposal_target_assigner to obtain regression and classification
targets for the first stage RPN, samples a "minibatch" of anchors to
participate in the loss computation, and returns the RPN losses.
Args:
rpn_box_encodings: A 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted proposal box encodings.
rpn_objectness_predictions_with_background: A 2-D float tensor of shape
[batch_size, num_anchors, 2] containing objectness predictions
(logits) for each of the anchors with 0 corresponding to background
and 1 corresponding to object.
anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN. Note that `num_anchors` can differ depending
on whether the model is created in training or inference mode.
groundtruth_boxlists: A list of BoxLists containing coordinates of the
groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
a dictionary mapping loss keys (`first_stage_localization_loss`,
`first_stage_objectness_loss`) to scalar tensors representing
corresponding loss values.
"""
with tf.name_scope('RPNLoss'):
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, _) = target_assigner.batch_assign_targets(
target_assigner=self._proposal_target_assigner,
anchors_batch=box_list.BoxList(anchors),
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=(len(groundtruth_boxlists) * [None]),
gt_weights_batch=groundtruth_weights_list)
batch_cls_weights = tf.reduce_mean(batch_cls_weights, axis=2)
batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2)
def _minibatch_subsample_fn(inputs):
cls_targets, cls_weights = inputs
return self._first_stage_sampler.subsample(
tf.cast(cls_weights, tf.bool),
self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool))
batch_sampled_indices = tf.cast(shape_utils.static_or_dynamic_map_fn(
_minibatch_subsample_fn,
[batch_cls_targets, batch_cls_weights],
dtype=tf.bool,
parallel_iterations=self._parallel_iterations,
back_prop=True), dtype=tf.float32)
# Normalize by number of examples in sampled minibatch
normalizer = tf.maximum(
tf.reduce_sum(batch_sampled_indices, axis=1), 1.0)
batch_one_hot_targets = tf.one_hot(
tf.cast(batch_cls_targets, dtype=tf.int32), depth=2)
sampled_reg_indices = tf.multiply(batch_sampled_indices,
batch_reg_weights)
losses_mask = None
if self.groundtruth_has_field(fields.InputDataFields.is_annotated):
losses_mask = tf.stack(self.groundtruth_lists(
fields.InputDataFields.is_annotated))
localization_losses = self._first_stage_localization_loss(
rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices,
losses_mask=losses_mask)
objectness_losses = self._first_stage_objectness_loss(
rpn_objectness_predictions_with_background,
batch_one_hot_targets,
weights=tf.expand_dims(batch_sampled_indices, axis=-1),
losses_mask=losses_mask)
localization_loss = tf.reduce_mean(
tf.reduce_sum(localization_losses, axis=1) / normalizer)
objectness_loss = tf.reduce_mean(
tf.reduce_sum(objectness_losses, axis=1) / normalizer)
localization_loss = tf.multiply(self._first_stage_loc_loss_weight,
localization_loss,
name='localization_loss')
objectness_loss = tf.multiply(self._first_stage_obj_loss_weight,
objectness_loss, name='objectness_loss')
loss_dict = {'Loss/RPNLoss/localization_loss': localization_loss,
'Loss/RPNLoss/objectness_loss': objectness_loss}
return loss_dict
def _loss_box_classifier(self,
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
image_shape,
prediction_masks=None,
groundtruth_masks_list=None,
detection_boxes=None,
num_detections=None):
"""Computes scalar box classifier loss tensors.
Uses self._detector_target_assigner to obtain regression and classification
targets for the second stage box classifier, optionally performs
hard mining, and returns losses. All losses are computed independently
for each image and then averaged across the batch.
Please note that for boxes and masks with multiple labels, the box
regression and mask prediction losses are only computed for one label.
This function assumes that the proposal boxes in the "padded" regions are
actually zero (and thus should not be matched to).
Args:
refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, box_coder.code_size] representing
predicted (final) refined box encodings. If using a shared box across
classes this will instead have shape
[total_num_proposals, 1, box_coder.code_size].
class_predictions_with_background: a 2-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors. Note that this tensor
*includes* background class predictions (at class index 0).
proposal_boxes: [batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
groundtruth_boxlists: a list of BoxLists containing coordinates of the
groundtruth boxes.
groundtruth_classes_with_background_list: a list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
image_shape: a 1-D tensor of shape [4] representing the image shape.
prediction_masks: an optional 4-D tensor with shape [total_num_proposals,
num_classes, mask_height, mask_width] containing the instance masks for
each box.
groundtruth_masks_list: an optional list of 3-D tensors of shape
[num_boxes, image_height, image_width] containing the instance masks for
each of the boxes.
detection_boxes: 3-D float tensor of shape [batch,
max_total_detections, 4] containing post-processed detection boxes in
normalized co-ordinates.
num_detections: 1-D int32 tensor of shape [batch] containing number of
valid detections in `detection_boxes`.
Returns:
a dictionary mapping loss keys ('second_stage_localization_loss',
'second_stage_classification_loss') to scalar tensors representing
corresponding loss values.
Raises:
ValueError: if `predict_instance_masks` in
second_stage_mask_rcnn_box_predictor is True and
`groundtruth_masks_list` is not provided.
"""
with tf.name_scope('BoxClassifierLoss'):
paddings_indicator = self._padded_batched_proposals_indicator(
num_proposals, proposal_boxes.shape[1])
proposal_boxlists = [
box_list.BoxList(proposal_boxes_single_image)
for proposal_boxes_single_image in tf.unstack(proposal_boxes)]
batch_size = len(proposal_boxlists)
num_proposals_or_one = tf.cast(tf.expand_dims(
tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1),
dtype=tf.float32)
normalizer = tf.tile(num_proposals_or_one,
[1, self.max_num_proposals]) * batch_size
(batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets,
batch_reg_weights, _) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_classes_with_background_list,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
gt_weights_batch=groundtruth_weights_list)
if self.groundtruth_has_field(
fields.InputDataFields.groundtruth_labeled_classes):
gt_labeled_classes = self.groundtruth_lists(
fields.InputDataFields.groundtruth_labeled_classes)
gt_labeled_classes = tf.pad(
gt_labeled_classes, [[0, 0], [1, 0]],
mode='CONSTANT',
constant_values=1)
batch_cls_weights *= tf.expand_dims(gt_labeled_classes, 1)
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size, self.max_num_proposals, -1])
flat_cls_targets_with_background = tf.reshape(
batch_cls_targets_with_background,
[batch_size * self.max_num_proposals, -1])
one_hot_flat_cls_targets_with_background = tf.argmax(
flat_cls_targets_with_background, axis=1)
one_hot_flat_cls_targets_with_background = tf.one_hot(
one_hot_flat_cls_targets_with_background,
flat_cls_targets_with_background.get_shape()[1])
# If using a shared box across classes use directly
if refined_box_encodings.shape[1] == 1:
reshaped_refined_box_encodings = tf.reshape(
refined_box_encodings,
[batch_size, self.max_num_proposals, self._box_coder.code_size])
# For anchors with multiple labels, picks refined_location_encodings
# for just one class to avoid over-counting for regression loss and
# (optionally) mask loss.
else:
reshaped_refined_box_encodings = (
self._get_refined_encodings_for_postitive_class(
refined_box_encodings,
one_hot_flat_cls_targets_with_background, batch_size))
losses_mask = None
if self.groundtruth_has_field(fields.InputDataFields.is_annotated):
losses_mask = tf.stack(self.groundtruth_lists(
fields.InputDataFields.is_annotated))
second_stage_loc_losses = self._second_stage_localization_loss(
reshaped_refined_box_encodings,
batch_reg_targets,
weights=batch_reg_weights,
losses_mask=losses_mask) / normalizer
second_stage_cls_losses = ops.reduce_sum_trailing_dimensions(
self._second_stage_classification_loss(
class_predictions_with_background,
batch_cls_targets_with_background,
weights=batch_cls_weights,
losses_mask=losses_mask),
ndims=2) / normalizer
second_stage_loc_loss = tf.reduce_sum(
second_stage_loc_losses * tf.cast(paddings_indicator,
dtype=tf.float32))
second_stage_cls_loss = tf.reduce_sum(
second_stage_cls_losses * tf.cast(paddings_indicator,
dtype=tf.float32))
if self._hard_example_miner:
(second_stage_loc_loss, second_stage_cls_loss
) = self._unpad_proposals_and_apply_hard_mining(
proposal_boxlists, second_stage_loc_losses,
second_stage_cls_losses, num_proposals)
localization_loss = tf.multiply(self._second_stage_loc_loss_weight,
second_stage_loc_loss,
name='localization_loss')
classification_loss = tf.multiply(self._second_stage_cls_loss_weight,
second_stage_cls_loss,
name='classification_loss')
loss_dict = {'Loss/BoxClassifierLoss/localization_loss':
localization_loss,
'Loss/BoxClassifierLoss/classification_loss':
classification_loss}
second_stage_mask_loss = None
if prediction_masks is not None:
if groundtruth_masks_list is None:
raise ValueError('Groundtruth instance masks not provided. '
'Please configure input reader.')
if not self._is_training:
(proposal_boxes, proposal_boxlists, paddings_indicator,
one_hot_flat_cls_targets_with_background
) = self._get_mask_proposal_boxes_and_classes(
detection_boxes, num_detections, image_shape,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list)
unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32)
(batch_mask_targets, _, _, batch_mask_target_weights,
_) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_masks_list,
unmatched_class_label=unmatched_mask_label,
gt_weights_batch=groundtruth_weights_list)
# Pad the prediction_masks with to add zeros for background class to be
# consistent with class predictions.
if prediction_masks.get_shape().as_list()[1] == 1:
# Class agnostic masks or masks for one-class prediction. Logic for
# both cases is the same since background predictions are ignored
# through the batch_mask_target_weights.
prediction_masks_masked_by_class_targets = prediction_masks
else:
prediction_masks_with_background = tf.pad(
prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]])
prediction_masks_masked_by_class_targets = tf.boolean_mask(
prediction_masks_with_background,
tf.greater(one_hot_flat_cls_targets_with_background, 0))
mask_height = shape_utils.get_dim_as_int(prediction_masks.shape[2])
mask_width = shape_utils.get_dim_as_int(prediction_masks.shape[3])
reshaped_prediction_masks = tf.reshape(
prediction_masks_masked_by_class_targets,
[batch_size, -1, mask_height * mask_width])
batch_mask_targets_shape = tf.shape(batch_mask_targets)
flat_gt_masks = tf.reshape(batch_mask_targets,
[-1, batch_mask_targets_shape[2],
batch_mask_targets_shape[3]])
# Use normalized proposals to crop mask targets from image masks.
flat_normalized_proposals = box_list_ops.to_normalized_coordinates(
box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])),
image_shape[1], image_shape[2], check_range=False).get()
flat_cropped_gt_mask = self._crop_and_resize_fn(
[tf.expand_dims(flat_gt_masks, -1)],
tf.expand_dims(flat_normalized_proposals, axis=1), None,
[mask_height, mask_width])
# Without stopping gradients into cropped groundtruth masks the
# performance with 100-padded groundtruth masks when batch size > 1 is
# about 4% worse.
# TODO(rathodv): Investigate this since we don't expect any variables
# upstream of flat_cropped_gt_mask.
flat_cropped_gt_mask = tf.stop_gradient(flat_cropped_gt_mask)
batch_cropped_gt_mask = tf.reshape(
flat_cropped_gt_mask,
[batch_size, -1, mask_height * mask_width])
mask_losses_weights = (
batch_mask_target_weights * tf.cast(paddings_indicator,
dtype=tf.float32))
mask_losses = self._second_stage_mask_loss(
reshaped_prediction_masks,
batch_cropped_gt_mask,
weights=tf.expand_dims(mask_losses_weights, axis=-1),
losses_mask=losses_mask)
total_mask_loss = tf.reduce_sum(mask_losses)
normalizer = tf.maximum(
tf.reduce_sum(mask_losses_weights * mask_height * mask_width), 1.0)
second_stage_mask_loss = total_mask_loss / normalizer
if second_stage_mask_loss is not None:
mask_loss = tf.multiply(self._second_stage_mask_loss_weight,
second_stage_mask_loss, name='mask_loss')
loss_dict['Loss/BoxClassifierLoss/mask_loss'] = mask_loss
return loss_dict
def _get_mask_proposal_boxes_and_classes(
self, detection_boxes, num_detections, image_shape, groundtruth_boxlists,
groundtruth_classes_with_background_list, groundtruth_weights_list):
"""Returns proposal boxes and class targets to compute evaluation mask loss.
During evaluation, detection boxes are used to extract features for mask
prediction. Therefore, to compute mask loss during evaluation detection
boxes must be used to compute correct class and mask targets. This function
returns boxes and classes in the correct format for computing mask targets
during evaluation.
Args:
detection_boxes: A 3-D float tensor of shape [batch, max_detection_boxes,
4] containing detection boxes in normalized co-ordinates.
num_detections: A 1-D float tensor of shape [batch] containing number of
valid boxes in `detection_boxes`.
image_shape: A 1-D tensor of shape [4] containing image tensor shape.
groundtruth_boxlists: A list of groundtruth boxlists.
groundtruth_classes_with_background_list: A list of groundtruth classes.
groundtruth_weights_list: A list of groundtruth weights.
Return:
mask_proposal_boxes: detection boxes to use for mask proposals in absolute
co-ordinates.
mask_proposal_boxlists: `mask_proposal_boxes` in a list of BoxLists in
absolute co-ordinates.
mask_proposal_paddings_indicator: a tensor indicating valid boxes.
mask_proposal_one_hot_flat_cls_targets_with_background: Class targets
computed using detection boxes.
"""
batch, max_num_detections, _ = detection_boxes.shape.as_list()
proposal_boxes = tf.reshape(box_list_ops.to_absolute_coordinates(
box_list.BoxList(tf.reshape(detection_boxes, [-1, 4])), image_shape[1],
image_shape[2]).get(), [batch, max_num_detections, 4])
proposal_boxlists = [
box_list.BoxList(detection_boxes_single_image)
for detection_boxes_single_image in tf.unstack(proposal_boxes)
]
paddings_indicator = self._padded_batched_proposals_indicator(
tf.cast(num_detections, dtype=tf.int32), detection_boxes.shape[1])
(batch_cls_targets_with_background, _, _, _,
_) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_classes_with_background_list,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
gt_weights_batch=groundtruth_weights_list)
flat_cls_targets_with_background = tf.reshape(
batch_cls_targets_with_background, [-1, self._num_classes + 1])
one_hot_flat_cls_targets_with_background = tf.argmax(
flat_cls_targets_with_background, axis=1)
one_hot_flat_cls_targets_with_background = tf.one_hot(
one_hot_flat_cls_targets_with_background,
flat_cls_targets_with_background.get_shape()[1])
return (proposal_boxes, proposal_boxlists, paddings_indicator,
one_hot_flat_cls_targets_with_background)
def _get_refined_encodings_for_postitive_class(
self, refined_box_encodings, flat_cls_targets_with_background,
batch_size):
# We only predict refined location encodings for the non background
# classes, but we now pad it to make it compatible with the class
# predictions
refined_box_encodings_with_background = tf.pad(refined_box_encodings,
[[0, 0], [1, 0], [0, 0]])
refined_box_encodings_masked_by_class_targets = (
box_list_ops.boolean_mask(
box_list.BoxList(
tf.reshape(refined_box_encodings_with_background,
[-1, self._box_coder.code_size])),
tf.reshape(tf.greater(flat_cls_targets_with_background, 0), [-1]),
use_static_shapes=self._use_static_shapes,
indicator_sum=batch_size * self.max_num_proposals
if self._use_static_shapes else None).get())
return tf.reshape(
refined_box_encodings_masked_by_class_targets, [
batch_size, self.max_num_proposals,
self._box_coder.code_size
])
def _padded_batched_proposals_indicator(self,
num_proposals,
max_num_proposals):
"""Creates indicator matrix of non-pad elements of padded batch proposals.
Args:
num_proposals: Tensor of type tf.int32 with shape [batch_size].
max_num_proposals: Maximum number of proposals per image (integer).
Returns:
A Tensor of type tf.bool with shape [batch_size, max_num_proposals].
"""
batch_size = tf.size(num_proposals)
tiled_num_proposals = tf.tile(
tf.expand_dims(num_proposals, 1), [1, max_num_proposals])
tiled_proposal_index = tf.tile(
tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])
return tf.greater(tiled_num_proposals, tiled_proposal_index)
def _unpad_proposals_and_apply_hard_mining(self,
proposal_boxlists,
second_stage_loc_losses,
second_stage_cls_losses,
num_proposals):
"""Unpads proposals and applies hard mining.
Args:
proposal_boxlists: A list of `batch_size` BoxLists each representing
`self.max_num_proposals` representing decoded proposal bounding boxes
for each image.
second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape
`[batch_size, self.max_num_proposals]` representing per-anchor
second stage localization loss values.
second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape
`[batch_size, self.max_num_proposals]` representing per-anchor
second stage classification loss values.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
Returns:
second_stage_loc_loss: A scalar float32 tensor representing the second
stage localization loss.
second_stage_cls_loss: A scalar float32 tensor representing the second
stage classification loss.
"""
for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss,
single_image_num_proposals) in zip(
proposal_boxlists,
tf.unstack(second_stage_loc_losses),
tf.unstack(second_stage_cls_losses),
tf.unstack(num_proposals)):
proposal_boxlist = box_list.BoxList(
tf.slice(proposal_boxlist.get(),
[0, 0], [single_image_num_proposals, -1]))
single_image_loc_loss = tf.slice(single_image_loc_loss,
[0], [single_image_num_proposals])
single_image_cls_loss = tf.slice(single_image_cls_loss,
[0], [single_image_num_proposals])
return self._hard_example_miner(
location_losses=tf.expand_dims(single_image_loc_loss, 0),
cls_losses=tf.expand_dims(single_image_cls_loss, 0),
decoded_boxlist_list=[proposal_boxlist])
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
all_losses = []
slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# Copy the slim losses to avoid modifying the collection
if slim_losses:
all_losses.extend(slim_losses)
# TODO(kaftan): Possibly raise an error if the feature extractors are
# uninitialized in Keras.
if self._feature_extractor_for_proposal_features:
if (self._feature_extractor_for_proposal_features !=
_UNINITIALIZED_FEATURE_EXTRACTOR):
all_losses.extend(self._feature_extractor_for_proposal_features.losses)
if isinstance(self._first_stage_box_predictor_first_conv,
tf.keras.Model):
all_losses.extend(
self._first_stage_box_predictor_first_conv.losses)
if self._first_stage_box_predictor.is_keras_model:
all_losses.extend(self._first_stage_box_predictor.losses)
if self._feature_extractor_for_box_classifier_features:
if (self._feature_extractor_for_box_classifier_features !=
_UNINITIALIZED_FEATURE_EXTRACTOR):
all_losses.extend(
self._feature_extractor_for_box_classifier_features.losses)
if self._mask_rcnn_box_predictor:
if self._mask_rcnn_box_predictor.is_keras_model:
all_losses.extend(self._mask_rcnn_box_predictor.losses)
return all_losses
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
load_all_detection_checkpoint_vars: whether to load all variables (when
`fine_tune_checkpoint_type` is `detection`). If False, only variables
within the feature extractor scopes are included. Default False.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
Raises:
ValueError: if fine_tune_checkpoint_type is neither `classification`
nor `detection`.
"""
if fine_tune_checkpoint_type not in ['detection', 'classification']:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
if fine_tune_checkpoint_type == 'classification':
return self._feature_extractor.restore_from_classification_checkpoint_fn(
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope)
variables_to_restore = variables_helper.get_global_variables_safely()
variables_to_restore.append(tf.train.get_or_create_global_step())
# Only load feature extractor variables to be consistent with loading from
# a classification checkpoint.
include_patterns = None
if not load_all_detection_checkpoint_vars:
include_patterns = [
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope
]
feature_extractor_variables = slim.filter_variables(
variables_to_restore, include_patterns=include_patterns)
return {var.op.name: var for var in feature_extractor_variables}
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (above) is intended
to be used to restore Slim-based models when running Tensorflow 1.x.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
if fine_tune_checkpoint_type == 'classification':
return {
'feature_extractor':
self._feature_extractor.classification_backbone
}
elif fine_tune_checkpoint_type == 'detection':
fake_model = tf.train.Checkpoint(
_feature_extractor_for_box_classifier_features=
self._feature_extractor_for_box_classifier_features,
_feature_extractor_for_proposal_features=
self._feature_extractor_for_proposal_features)
return {'model': fake_model}
elif fine_tune_checkpoint_type == 'full':
return {'model': self}
else:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
update_ops = []
slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Copy the slim ops to avoid modifying the collection
if slim_update_ops:
update_ops.extend(slim_update_ops)
# Passing None to get_updates_for grabs updates that should always be
# executed and don't depend on any model inputs in the graph.
# (E.g. if there was some count that should be incremented every time a
# model is run).
#
# Passing inputs grabs updates that are transitively computed from the
# model inputs being passed in.
# (E.g. a batchnorm update depends on the observed inputs)
if self._feature_extractor_for_proposal_features:
if (self._feature_extractor_for_proposal_features !=
_UNINITIALIZED_FEATURE_EXTRACTOR):
update_ops.extend(
self._feature_extractor_for_proposal_features.get_updates_for(None))
update_ops.extend(
self._feature_extractor_for_proposal_features.get_updates_for(
self._feature_extractor_for_proposal_features.inputs))
if isinstance(self._first_stage_box_predictor_first_conv,
tf.keras.Model):
update_ops.extend(
self._first_stage_box_predictor_first_conv.get_updates_for(
None))
update_ops.extend(
self._first_stage_box_predictor_first_conv.get_updates_for(
self._first_stage_box_predictor_first_conv.inputs))
if self._first_stage_box_predictor.is_keras_model:
update_ops.extend(
self._first_stage_box_predictor.get_updates_for(None))
update_ops.extend(
self._first_stage_box_predictor.get_updates_for(
self._first_stage_box_predictor.inputs))
if self._feature_extractor_for_box_classifier_features:
if (self._feature_extractor_for_box_classifier_features !=
_UNINITIALIZED_FEATURE_EXTRACTOR):
update_ops.extend(
self._feature_extractor_for_box_classifier_features.get_updates_for(
None))
update_ops.extend(
self._feature_extractor_for_box_classifier_features.get_updates_for(
self._feature_extractor_for_box_classifier_features.inputs))
if self._mask_rcnn_box_predictor:
if self._mask_rcnn_box_predictor.is_keras_model:
update_ops.extend(
self._mask_rcnn_box_predictor.get_updates_for(None))
update_ops.extend(
self._mask_rcnn_box_predictor.get_updates_for(
self._mask_rcnn_box_predictor.inputs))
return update_ops
| 144,173 | 47.592518 | 80 | py |
models | models-master/research/object_detection/meta_architectures/deepmac_meta_arch_test.py | """Tests for google3.third_party.tensorflow_models.object_detection.meta_architectures.deepmac_meta_arch."""
import functools
import math
import random
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from object_detection.core import losses
from object_detection.core import preprocessor
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.meta_architectures import deepmac_meta_arch
from object_detection.protos import center_net_pb2
from object_detection.utils import tf_version
def _logit(probability):
return math.log(probability / (1. - probability))
LOGIT_HALF = _logit(0.5)
LOGIT_QUARTER = _logit(0.25)
class DummyFeatureExtractor(center_net_meta_arch.CenterNetFeatureExtractor):
def __init__(self,
channel_means,
channel_stds,
bgr_ordering,
num_feature_outputs,
stride):
self._num_feature_outputs = num_feature_outputs
self._stride = stride
super(DummyFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def predict(self):
pass
def loss(self):
pass
def postprocess(self):
pass
def call(self, inputs):
batch_size, input_height, input_width, _ = inputs.shape
fake_output = tf.ones([
batch_size, input_height // self._stride, input_width // self._stride,
64
], dtype=tf.float32)
return [fake_output] * self._num_feature_outputs
@property
def out_stride(self):
return self._stride
@property
def num_feature_outputs(self):
return self._num_feature_outputs
class MockMaskNet(tf.keras.layers.Layer):
def __call__(self, instance_embedding, pixel_embedding, training):
return tf.zeros_like(pixel_embedding[:, :, :, 0]) + 0.9
def build_meta_arch(**override_params):
"""Builds the DeepMAC meta architecture."""
params = dict(
predict_full_resolution_masks=False,
use_instance_embedding=True,
mask_num_subsamples=-1,
network_type='hourglass10',
use_xy=True,
pixel_embedding_dim=2,
dice_loss_prediction_probability=False,
feature_consistency_threshold=0.5,
use_dice_loss=False,
box_consistency_loss_normalize='normalize_auto',
box_consistency_tightness=False,
task_loss_weight=1.0,
feature_consistency_loss_weight=1.0,
box_consistency_loss_weight=1.0,
num_init_channels=8,
dim=8,
allowed_masked_classes_ids=[],
mask_size=16,
postprocess_crop_size=128,
max_roi_jitter_ratio=0.0,
roi_jitter_mode='default',
feature_consistency_dilation=2,
feature_consistency_warmup_steps=0,
feature_consistency_warmup_start=0,
use_only_last_stage=True,
augmented_self_supervision_max_translation=0.0,
augmented_self_supervision_loss_weight=0.0,
augmented_self_supervision_flip_probability=0.0,
augmented_self_supervision_warmup_start=0,
augmented_self_supervision_warmup_steps=0,
augmented_self_supervision_loss='loss_dice',
augmented_self_supervision_scale_min=1.0,
augmented_self_supervision_scale_max=1.0,
pointly_supervised_keypoint_loss_weight=1.0,
ignore_per_class_box_overlap=False,
feature_consistency_type='consistency_default_lab',
feature_consistency_comparison='comparison_default_gaussian')
params.update(override_params)
feature_extractor = DummyFeatureExtractor(
channel_means=(1.0, 2.0, 3.0),
channel_stds=(10., 20., 30.),
bgr_ordering=False,
num_feature_outputs=2,
stride=4)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=128,
max_dimension=128,
pad_to_max_dimesnion=True)
object_center_params = center_net_meta_arch.ObjectCenterParams(
classification_loss=losses.WeightedSigmoidClassificationLoss(),
object_center_loss_weight=1.0,
min_box_overlap_iou=1.0,
max_box_predictions=5,
use_labeled_classes=False)
use_dice_loss = params.pop('use_dice_loss')
dice_loss_prediction_prob = params.pop('dice_loss_prediction_probability')
if use_dice_loss:
classification_loss = losses.WeightedDiceClassificationLoss(
squared_normalization=False,
is_prediction_probability=dice_loss_prediction_prob)
else:
classification_loss = losses.WeightedSigmoidClassificationLoss()
deepmac_params = deepmac_meta_arch.DeepMACParams(
classification_loss=classification_loss,
**params
)
object_detection_params = center_net_meta_arch.ObjectDetectionParams(
localization_loss=losses.L1LocalizationLoss(),
offset_loss_weight=1.0,
scale_loss_weight=0.1
)
return deepmac_meta_arch.DeepMACMetaArch(
is_training=True,
add_summaries=False,
num_classes=6,
feature_extractor=feature_extractor,
object_center_params=object_center_params,
deepmac_params=deepmac_params,
object_detection_params=object_detection_params,
image_resizer_fn=image_resizer_fn)
DEEPMAC_PROTO_TEXT = """
dim: 153
task_loss_weight: 5.0
pixel_embedding_dim: 8
use_xy: false
use_instance_embedding: false
network_type: "cond_inst3"
classification_loss {
weighted_dice_classification_loss {
squared_normalization: false
is_prediction_probability: false
}
}
jitter_mode: EXPAND_SYMMETRIC_XY
max_roi_jitter_ratio: 0.0
predict_full_resolution_masks: true
allowed_masked_classes_ids: [99]
box_consistency_loss_weight: 1.0
feature_consistency_loss_weight: 1.0
feature_consistency_threshold: 0.1
box_consistency_tightness: false
box_consistency_loss_normalize: NORMALIZE_AUTO
feature_consistency_warmup_steps: 20
feature_consistency_warmup_start: 10
use_only_last_stage: false
augmented_self_supervision_warmup_start: 13
augmented_self_supervision_warmup_steps: 14
augmented_self_supervision_loss: LOSS_MSE
augmented_self_supervision_loss_weight: 11.0
augmented_self_supervision_max_translation: 2.5
augmented_self_supervision_flip_probability: 0.9
augmented_self_supervision_scale_min: 0.42
augmented_self_supervision_scale_max: 1.42
pointly_supervised_keypoint_loss_weight: 0.13
ignore_per_class_box_overlap: true
feature_consistency_type: CONSISTENCY_FEATURE_MAP
feature_consistency_comparison: COMPARISON_NORMALIZED_DOTPROD
"""
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class DeepMACUtilsTest(tf.test.TestCase, parameterized.TestCase):
def test_proto_parse(self):
proto = center_net_pb2.CenterNet().DeepMACMaskEstimation()
text_format.Parse(DEEPMAC_PROTO_TEXT, proto)
params = deepmac_meta_arch.deepmac_proto_to_params(proto)
self.assertIsInstance(params, deepmac_meta_arch.DeepMACParams)
self.assertEqual(params.num_init_channels, 64)
self.assertEqual(params.dim, 153)
self.assertEqual(params.box_consistency_loss_normalize, 'normalize_auto')
self.assertFalse(params.use_only_last_stage)
self.assertEqual(params.augmented_self_supervision_warmup_start, 13)
self.assertEqual(params.augmented_self_supervision_warmup_steps, 14)
self.assertEqual(params.augmented_self_supervision_loss, 'loss_mse')
self.assertEqual(params.augmented_self_supervision_loss_weight, 11.0)
self.assertEqual(params.augmented_self_supervision_max_translation, 2.5)
self.assertAlmostEqual(
params.augmented_self_supervision_flip_probability, 0.9)
self.assertAlmostEqual(
params.augmented_self_supervision_scale_min, 0.42)
self.assertAlmostEqual(
params.augmented_self_supervision_scale_max, 1.42)
self.assertAlmostEqual(
params.pointly_supervised_keypoint_loss_weight, 0.13)
self.assertTrue(params.ignore_per_class_box_overlap)
self.assertEqual(params.feature_consistency_type, 'consistency_feature_map')
self.assertEqual(
params.feature_consistency_comparison, 'comparison_normalized_dotprod')
def test_subsample_trivial(self):
"""Test subsampling masks."""
boxes = np.arange(4).reshape(4, 1) * np.ones((4, 4))
masks = np.arange(4).reshape(4, 1, 1) * np.ones((4, 32, 32))
weights = np.ones(4)
classes = tf.one_hot(tf.range(4), depth=4)
result = deepmac_meta_arch.subsample_instances(
classes, weights, boxes, masks, 4)
self.assertAllClose(result[0], classes)
self.assertAllClose(result[1], weights)
self.assertAllClose(result[2], boxes)
self.assertAllClose(result[3], masks)
def test_filter_masked_classes(self):
classes = np.zeros((2, 3, 5), dtype=np.float32)
classes[0, 0] = [1.0, 0.0, 0.0, 0.0, 0.0]
classes[0, 1] = [0.0, 1.0, 0.0, 0.0, 0.0]
classes[0, 2] = [0.0, 0.0, 1.0, 0.0, 0.0]
classes[1, 0] = [0.0, 0.0, 0.0, 1.0, 0.0]
classes[1, 1] = [0.0, 0.0, 0.0, 0.0, 1.0]
classes[1, 2] = [0.0, 0.0, 0.0, 0.0, 1.0]
classes = tf.constant(classes)
weights = tf.constant([[1.0, 1.0, 1.0], [1.0, 1.0, 0.0]])
masks = tf.ones((2, 3, 32, 32), dtype=tf.float32)
classes, weights, masks = deepmac_meta_arch.filter_masked_classes(
[3, 4], classes, weights, masks)
expected_classes = np.zeros((2, 3, 5))
expected_classes[0, 0] = [0.0, 0.0, 0.0, 0.0, 0.0]
expected_classes[0, 1] = [0.0, 0.0, 0.0, 0.0, 0.0]
expected_classes[0, 2] = [0.0, 0.0, 1.0, 0.0, 0.0]
expected_classes[1, 0] = [0.0, 0.0, 0.0, 1.0, 0.0]
expected_classes[1, 1] = [0.0, 0.0, 0.0, 0.0, 0.0]
expected_classes[1, 2] = [0.0, 0.0, 0.0, 0.0, 0.0]
self.assertAllClose(expected_classes, classes.numpy())
self.assertAllClose(np.array(([0.0, 0.0, 1.0], [1.0, 0.0, 0.0])), weights)
self.assertAllClose(masks[0, 0], np.zeros((32, 32)))
self.assertAllClose(masks[0, 1], np.zeros((32, 32)))
self.assertAllClose(masks[0, 2], np.ones((32, 32)))
self.assertAllClose(masks[1, 0], np.ones((32, 32)))
self.assertAllClose(masks[1, 1], np.zeros((32, 32)))
def test_fill_boxes(self):
boxes = tf.constant([[[0., 0., 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]])
filled_boxes = deepmac_meta_arch.fill_boxes(boxes, 32, 32)
expected = np.zeros((2, 2, 32, 32))
expected[0, 0, :17, :17] = 1.0
expected[0, 1, 16:, 16:] = 1.0
expected[1, 0, :, :] = 1.0
filled_boxes = filled_boxes.numpy()
self.assertAllClose(expected[0, 0], filled_boxes[0, 0], rtol=1e-3)
self.assertAllClose(expected[0, 1], filled_boxes[0, 1], rtol=1e-3)
self.assertAllClose(expected[1, 0], filled_boxes[1, 0], rtol=1e-3)
def test_flatten_and_unpack(self):
t = tf.random.uniform((2, 3, 4, 5, 6))
flatten = tf.function(deepmac_meta_arch.flatten_first2_dims)
unpack = tf.function(deepmac_meta_arch.unpack_first2_dims)
result, d1, d2 = flatten(t)
result = unpack(result, d1, d2)
self.assertAllClose(result.numpy(), t)
def test_crop_and_resize_instance_masks(self):
boxes = tf.zeros((8, 5, 4))
masks = tf.zeros((8, 5, 128, 128))
output = deepmac_meta_arch.crop_and_resize_instance_masks(
masks, boxes, 32)
self.assertEqual(output.shape, (8, 5, 32, 32))
def test_embedding_projection_prob_shape(self):
dist = deepmac_meta_arch.embedding_projection(
tf.ones((4, 32, 32, 8)), tf.zeros((4, 32, 32, 8)))
self.assertEqual(dist.shape, (4, 32, 32, 1))
@parameterized.parameters([1e-20, 1e20])
def test_embedding_projection_value(self, value):
dist = deepmac_meta_arch.embedding_projection(
tf.zeros((1, 1, 1, 8)), value + tf.zeros((1, 1, 1, 8))).numpy()
max_float = np.finfo(dist.dtype).max
self.assertLess(dist.max(), max_float)
self.assertGreater(dist.max(), -max_float)
@parameterized.named_parameters(
[('no_conv_shortcut', (False,)),
('conv_shortcut', (True,))]
)
def test_res_dense_block(self, conv_shortcut):
net = deepmac_meta_arch.DenseResidualBlock(32, conv_shortcut)
out = net(tf.zeros((2, 32)))
self.assertEqual(out.shape, (2, 32))
@parameterized.parameters(
[4, 8, 20]
)
def test_dense_resnet(self, num_layers):
net = deepmac_meta_arch.DenseResNet(num_layers, 16, 8)
out = net(tf.zeros((2, 24)))
self.assertEqual(out.shape, (2, 8))
def test_generate_2d_neighbors_shape(self):
inp = tf.zeros((5, 13, 14, 3))
out = deepmac_meta_arch.generate_2d_neighbors(inp)
self.assertEqual((8, 5, 13, 14, 3), out.shape)
def test_generate_2d_neighbors(self):
inp = np.arange(16).reshape(4, 4).astype(np.float32)
inp = tf.stack([inp, inp * 2], axis=2)
inp = tf.reshape(inp, (1, 4, 4, 2))
out = deepmac_meta_arch.generate_2d_neighbors(inp, dilation=1)
self.assertEqual((8, 1, 4, 4, 2), out.shape)
for i in range(2):
expected = np.array([0, 1, 2, 4, 6, 8, 9, 10]) * (i + 1)
self.assertAllEqual(out[:, 0, 1, 1, i], expected)
expected = np.array([1, 2, 3, 5, 7, 9, 10, 11]) * (i + 1)
self.assertAllEqual(out[:, 0, 1, 2, i], expected)
expected = np.array([4, 5, 6, 8, 10, 12, 13, 14]) * (i + 1)
self.assertAllEqual(out[:, 0, 2, 1, i], expected)
expected = np.array([5, 6, 7, 9, 11, 13, 14, 15]) * (i + 1)
self.assertAllEqual(out[:, 0, 2, 2, i], expected)
def test_generate_2d_neighbors_dilation2(self):
inp = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32)
out = deepmac_meta_arch.generate_2d_neighbors(inp, dilation=2)
self.assertEqual((8, 1, 4, 4, 1), out.shape)
expected = np.array([0, 0, 0, 0, 2, 0, 8, 10])
self.assertAllEqual(out[:, 0, 0, 0, 0], expected)
def test_dilated_similarity_shape(self):
fmap = tf.zeros((5, 32, 32, 9))
similarity = deepmac_meta_arch.dilated_cross_pixel_similarity(
fmap)
self.assertEqual((8, 5, 32, 32), similarity.shape)
def test_dilated_similarity(self):
fmap = np.zeros((1, 5, 5, 2), dtype=np.float32)
fmap[0, 0, 0, :] = 1.0
fmap[0, 4, 4, :] = 1.0
similarity = deepmac_meta_arch.dilated_cross_pixel_similarity(
fmap, theta=1.0, dilation=2)
self.assertAlmostEqual(similarity.numpy()[0, 0, 2, 2],
np.exp(-np.sqrt(2)))
def test_dilated_same_instance_mask_shape(self):
instances = tf.zeros((2, 5, 32, 32))
output = deepmac_meta_arch.dilated_cross_same_mask_label(instances)
self.assertEqual((8, 2, 5, 32, 32), output.shape)
def test_dilated_same_instance_mask(self):
instances = np.zeros((3, 2, 5, 5), dtype=np.float32)
instances[0, 0, 0, 0] = 1.0
instances[0, 0, 2, 2] = 1.0
instances[0, 0, 4, 4] = 1.0
instances[2, 0, 0, 0] = 1.0
instances[2, 0, 2, 2] = 1.0
instances[2, 0, 4, 4] = 0.0
output = deepmac_meta_arch.dilated_cross_same_mask_label(instances).numpy()
self.assertAllClose(np.ones((8, 2, 5, 5)), output[:, 1, :, :])
self.assertAllClose([1, 0, 0, 0, 0, 0, 0, 1], output[:, 0, 0, 2, 2])
self.assertAllClose([1, 0, 0, 0, 0, 0, 0, 0], output[:, 2, 0, 2, 2])
def test_per_pixel_single_conv_multiple_instance(self):
inp = tf.zeros((5, 32, 32, 7))
params = tf.zeros((5, 7*8 + 8))
out = deepmac_meta_arch._per_pixel_single_conv(inp, params, 8)
self.assertEqual(out.shape, (5, 32, 32, 8))
def test_per_pixel_conditional_conv_error(self):
with self.assertRaises(ValueError):
deepmac_meta_arch.per_pixel_conditional_conv(
tf.zeros((10, 32, 32, 8)), tf.zeros((10, 2)), 8, 3)
def test_per_pixel_conditional_conv_error_tf_func(self):
with self.assertRaises(ValueError):
func = tf.function(deepmac_meta_arch.per_pixel_conditional_conv)
func(tf.zeros((10, 32, 32, 8)), tf.zeros((10, 2)), 8, 3)
def test_per_pixel_conditional_conv_depth1_error(self):
with self.assertRaises(ValueError):
_ = deepmac_meta_arch.per_pixel_conditional_conv(
tf.zeros((10, 32, 32, 7)), tf.zeros((10, 8)), 99, 1)
@parameterized.parameters([
{
'num_input_channels': 7,
'instance_embedding_dim': 8,
'channels': 7,
'depth': 1
},
{
'num_input_channels': 7,
'instance_embedding_dim': 82,
'channels': 9,
'depth': 2
},
{ # From https://arxiv.org/abs/2003.05664
'num_input_channels': 10,
'instance_embedding_dim': 169,
'channels': 8,
'depth': 3
},
{
'num_input_channels': 8,
'instance_embedding_dim': 433,
'channels': 16,
'depth': 3
},
{
'num_input_channels': 8,
'instance_embedding_dim': 1377,
'channels': 32,
'depth': 3
},
{
'num_input_channels': 8,
'instance_embedding_dim': 4801,
'channels': 64,
'depth': 3
},
])
def test_per_pixel_conditional_conv_shape(
self, num_input_channels, instance_embedding_dim, channels, depth):
out = deepmac_meta_arch.per_pixel_conditional_conv(
tf.zeros((10, 32, 32, num_input_channels)),
tf.zeros((10, instance_embedding_dim)), channels, depth)
self.assertEqual(out.shape, (10, 32, 32, 1))
def test_per_pixel_conditional_conv_value_depth1(self):
input_tensor = tf.constant(np.array([1, 2, 3]))
input_tensor = tf.reshape(input_tensor, (1, 1, 1, 3))
instance_embedding = tf.constant(
np.array([1, 10, 100, 1000]))
instance_embedding = tf.reshape(instance_embedding, (1, 4))
out = deepmac_meta_arch.per_pixel_conditional_conv(
input_tensor, instance_embedding, channels=3, depth=1)
expected_output = np.array([1321])
expected_output = np.reshape(expected_output, (1, 1, 1, 1))
self.assertAllClose(expected_output, out)
def test_per_pixel_conditional_conv_value_depth2_single(self):
input_tensor = tf.constant(np.array([2]))
input_tensor = tf.reshape(input_tensor, (1, 1, 1, 1))
instance_embedding = tf.constant(
np.array([-2, 3, 100, 5]))
instance_embedding = tf.reshape(instance_embedding, (1, 4))
out = deepmac_meta_arch.per_pixel_conditional_conv(
input_tensor, instance_embedding, channels=1, depth=2)
expected_output = np.array([5])
expected_output = np.reshape(expected_output, (1, 1, 1, 1))
self.assertAllClose(expected_output, out)
def test_per_pixel_conditional_conv_value_depth2_identity(self):
input_tensor = tf.constant(np.array([1, 2]))
input_tensor = tf.reshape(input_tensor, (1, 1, 1, 2))
instance_embedding = tf.constant(
np.array([1, 0, 0, 1, 1, -3, 5, 100, -9]))
instance_embedding = tf.reshape(
instance_embedding, (1, 9))
out = deepmac_meta_arch.per_pixel_conditional_conv(
input_tensor, instance_embedding, channels=2, depth=2)
expected_output = np.array([1])
expected_output = np.reshape(expected_output, (1, 1, 1, 1))
self.assertAllClose(expected_output, out)
def test_per_instance_no_class_overlap(self):
boxes = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.4, 0.4]],
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]],
dtype=tf.float32)
classes = tf.constant([[[0, 1, 0], [0, 1, 0]], [[0, 1, 0], [1, 0, 0]]],
dtype=tf.float32)
output = deepmac_meta_arch.per_instance_no_class_overlap(
classes, boxes, 2, 2)
self.assertEqual(output.shape, (2, 2, 2, 2))
self.assertAllClose(output[1], np.ones((2, 2, 2)))
self.assertAllClose(output[0, 1], [[0., 1.0], [1.0, 1.0]])
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class DeepMACMaskHeadTest(tf.test.TestCase, parameterized.TestCase):
def test_mask_network_params_resnet4(self):
net = deepmac_meta_arch.MaskHeadNetwork('resnet4', num_init_channels=8)
_ = net(tf.zeros((2, 16)), tf.zeros((2, 32, 32, 16)), training=True)
trainable_params = tf.reduce_sum([tf.reduce_prod(tf.shape(w)) for w in
net.trainable_weights])
self.assertEqual(trainable_params.numpy(), 8665)
def test_mask_network_embedding_projection_small(self):
net = deepmac_meta_arch.MaskHeadNetwork(
'embedding_projection', num_init_channels=-1,
use_instance_embedding=False)
call_func = tf.function(net.__call__)
out = call_func(1e6 + tf.zeros((2, 7)),
tf.zeros((2, 32, 32, 7)), training=True)
self.assertEqual(out.shape, (2, 32, 32))
self.assertAllGreater(out.numpy(), -np.inf)
self.assertAllLess(out.numpy(), np.inf)
@parameterized.parameters([
{
'mask_net': 'resnet4',
'mask_net_channels': 8,
'instance_embedding_dim': 4,
'input_channels': 16,
'use_instance_embedding': False
},
{
'mask_net': 'hourglass10',
'mask_net_channels': 8,
'instance_embedding_dim': 4,
'input_channels': 16,
'use_instance_embedding': False
},
{
'mask_net': 'hourglass20',
'mask_net_channels': 8,
'instance_embedding_dim': 4,
'input_channels': 16,
'use_instance_embedding': False
},
{
'mask_net': 'cond_inst3',
'mask_net_channels': 8,
'instance_embedding_dim': 153,
'input_channels': 8,
'use_instance_embedding': False
},
{
'mask_net': 'cond_inst3',
'mask_net_channels': 8,
'instance_embedding_dim': 169,
'input_channels': 10,
'use_instance_embedding': False
},
{
'mask_net': 'cond_inst1',
'mask_net_channels': 8,
'instance_embedding_dim': 9,
'input_channels': 8,
'use_instance_embedding': False
},
{
'mask_net': 'cond_inst2',
'mask_net_channels': 8,
'instance_embedding_dim': 81,
'input_channels': 8,
'use_instance_embedding': False
},
])
def test_mask_network(self, mask_net, mask_net_channels,
instance_embedding_dim, input_channels,
use_instance_embedding):
net = deepmac_meta_arch.MaskHeadNetwork(
mask_net, num_init_channels=mask_net_channels,
use_instance_embedding=use_instance_embedding)
call_func = tf.function(net.__call__)
out = call_func(tf.zeros((2, instance_embedding_dim)),
tf.zeros((2, 32, 32, input_channels)), training=True)
self.assertEqual(out.shape, (2, 32, 32))
self.assertAllGreater(out.numpy(), -np.inf)
self.assertAllLess(out.numpy(), np.inf)
out = call_func(tf.zeros((2, instance_embedding_dim)),
tf.zeros((2, 32, 32, input_channels)), training=True)
self.assertEqual(out.shape, (2, 32, 32))
out = call_func(tf.zeros((0, instance_embedding_dim)),
tf.zeros((0, 32, 32, input_channels)), training=True)
self.assertEqual(out.shape, (0, 32, 32))
@parameterized.parameters(
[
dict(x=4, y=4, height=4, width=4),
dict(x=1, y=2, height=3, width=4),
dict(x=14, y=14, height=5, width=5),
]
)
def test_transform_images_and_boxes_identity(self, x, y, height, width):
images = np.zeros((1, 32, 32, 3), dtype=np.float32)
images[:, y:y + height, x:x + width, :] = 1.0
boxes = tf.constant([[[y / 32., x / 32.,
y / 32. + height/32, x/32. + width / 32]]])
zeros = tf.zeros(1)
ones = tf.ones(1)
falses = tf.zeros(1, dtype=tf.bool)
images = tf.constant(images)
images_out, boxes_out = deepmac_meta_arch.transform_images_and_boxes(
images, boxes, zeros, zeros, ones, ones, falses)
self.assertAllClose(images, images_out)
self.assertAllClose(boxes, boxes_out)
coords = np.argwhere(images_out.numpy()[0, :, :, 0] > 0.5)
self.assertEqual(np.min(coords[:, 0]), y)
self.assertEqual(np.min(coords[:, 1]), x)
self.assertEqual(np.max(coords[:, 0]), y + height - 1)
self.assertEqual(np.max(coords[:, 1]), x + width - 1)
def test_transform_images_and_boxes(self):
images = np.zeros((2, 32, 32, 3), dtype=np.float32)
images[:, 14:19, 14:19, :] = 1.0
boxes = tf.constant(
[[[14.0 / 32, 14.0 / 32, 18.0 / 32, 18.0 / 32]] * 2] * 2)
flip = tf.constant([False, False])
scale_y0 = 2.0
translate_y0 = 1.0
scale_x0 = 4.0
translate_x0 = 4.0
scale_y1 = 3.0
translate_y1 = 3.0
scale_x1 = 0.5
translate_x1 = 2.0
ty = tf.constant([translate_y0/32, translate_y1/32])
sy = tf.constant([1./scale_y0, 1.0 / scale_y1])
tx = tf.constant([translate_x0/32, translate_x1/32])
sx = tf.constant([1 / scale_x0, 1.0 / scale_x1])
images = tf.constant(images)
images_out, boxes_out = deepmac_meta_arch.transform_images_and_boxes(
images, boxes, tx=tx, ty=ty, sx=sx, sy=sy, flip=flip)
boxes_out = boxes_out.numpy() * 32
coords = np.argwhere(images_out[0, :, :, 0] >= 0.9)
ymin = np.min(coords[:, 0])
ymax = np.max(coords[:, 0])
xmin = np.min(coords[:, 1])
xmax = np.max(coords[:, 1])
self.assertAlmostEqual(
ymin, 16 - 2*scale_y0 + translate_y0, delta=1)
self.assertAlmostEqual(
ymax, 16 + 2*scale_y0 + translate_y0, delta=1)
self.assertAlmostEqual(
xmin, 16 - 2*scale_x0 + translate_x0, delta=1)
self.assertAlmostEqual(
xmax, 16 + 2*scale_x0 + translate_x0, delta=1)
self.assertAlmostEqual(ymin, boxes_out[0, 0, 0], delta=1)
self.assertAlmostEqual(xmin, boxes_out[0, 0, 1], delta=1)
self.assertAlmostEqual(ymax, boxes_out[0, 0, 2], delta=1)
self.assertAlmostEqual(xmax, boxes_out[0, 0, 3], delta=1)
coords = np.argwhere(images_out[1, :, :, 0] >= 0.9)
ymin = np.min(coords[:, 0])
ymax = np.max(coords[:, 0])
xmin = np.min(coords[:, 1])
xmax = np.max(coords[:, 1])
self.assertAlmostEqual(
ymin, 16 - 2*scale_y1 + translate_y1, delta=1)
self.assertAlmostEqual(
ymax, 16 + 2*scale_y1 + translate_y1, delta=1)
self.assertAlmostEqual(
xmin, 16 - 2*scale_x1 + translate_x1, delta=1)
self.assertAlmostEqual(
xmax, 16 + 2*scale_x1 + translate_x1, delta=1)
self.assertAlmostEqual(ymin, boxes_out[1, 0, 0], delta=1)
self.assertAlmostEqual(xmin, boxes_out[1, 0, 1], delta=1)
self.assertAlmostEqual(ymax, boxes_out[1, 0, 2], delta=1)
self.assertAlmostEqual(xmax, boxes_out[1, 0, 3], delta=1)
def test_transform_images_and_boxes_flip(self):
images = np.zeros((2, 2, 2, 1), dtype=np.float32)
images[0, :, :, 0] = [[1, 2], [3, 4]]
images[1, :, :, 0] = [[1, 2], [3, 4]]
images = tf.constant(images)
boxes = tf.constant(
[[[0.1, 0.2, 0.3, 0.4]], [[0.1, 0.2, 0.3, 0.4]]], dtype=tf.float32)
tx = ty = tf.zeros([2], dtype=tf.float32)
sx = sy = tf.ones([2], dtype=tf.float32)
flip = tf.constant([True, False])
output_images, output_boxes = deepmac_meta_arch.transform_images_and_boxes(
images, boxes, tx, ty, sx, sy, flip)
expected_images = np.zeros((2, 2, 2, 1), dtype=np.float32)
expected_images[0, :, :, 0] = [[2, 1], [4, 3]]
expected_images[1, :, :, 0] = [[1, 2], [3, 4]]
self.assertAllClose(output_boxes,
[[[0.1, 0.6, 0.3, 0.8]], [[0.1, 0.2, 0.3, 0.4]]])
self.assertAllClose(expected_images, output_images)
def test_transform_images_and_boxes_tf_function(self):
func = tf.function(deepmac_meta_arch.transform_images_and_boxes)
output, _ = func(images=tf.zeros((2, 32, 32, 3)), boxes=tf.zeros((2, 5, 4)),
tx=tf.zeros(2), ty=tf.zeros(2),
sx=tf.ones(2), sy=tf.ones(2),
flip=tf.zeros(2, dtype=tf.bool))
self.assertEqual(output.shape, (2, 32, 32, 3))
def test_transform_instance_masks(self):
instance_masks = np.zeros((2, 10, 32, 32), dtype=np.float32)
instance_masks[0, 0, 1, 1] = 1
instance_masks[0, 1, 1, 1] = 1
instance_masks[1, 0, 2, 2] = 1
instance_masks[1, 1, 2, 2] = 1
tx = ty = tf.constant([1., 2.]) / 32.0
sx = sy = tf.ones(2, dtype=tf.float32)
flip = tf.zeros(2, dtype=tf.bool)
instance_masks = deepmac_meta_arch.transform_instance_masks(
instance_masks, tx, ty, sx, sy, flip=flip)
self.assertEqual(instance_masks.shape, (2, 10, 32, 32))
self.assertAlmostEqual(
instance_masks[0].numpy().sum(), 2.0)
self.assertGreater(
instance_masks[0, 0, 2, 2].numpy(), 0.5)
self.assertGreater(
instance_masks[0, 1, 2, 2].numpy(), 0.5)
self.assertAlmostEqual(
instance_masks[1].numpy().sum(), 2.0)
self.assertGreater(
instance_masks[1, 0, 4, 4].numpy(), 0.5)
self.assertGreater(
instance_masks[1, 1, 4, 4].numpy(), 0.5)
def test_augment_image_and_deaugment_mask(self):
img = np.zeros((1, 32, 32, 3), dtype=np.float32)
img[0, 10:12, 10:12, :] = 1.0
tx = ty = tf.constant([1.]) / 32.0
sx = sy = tf.constant([1.0 / 2.0])
flip = tf.constant([False])
img = tf.constant(img)
img_t, _ = deepmac_meta_arch.transform_images_and_boxes(
images=img, boxes=None, tx=tx, ty=ty, sx=sx, sy=sy, flip=flip)
self.assertAlmostEqual(img_t.numpy().sum(), 16 * 3)
# Converting channels of the image to instances.
masks = tf.transpose(img_t, (0, 3, 1, 2))
masks_t = deepmac_meta_arch.transform_instance_masks(
masks, tx=-tx, ty=-ty, sx=1.0/sx, sy=1.0/sy, flip=flip)
self.assertAlmostEqual(masks_t.numpy().sum(), 4 * 3)
coords = np.argwhere(masks_t[0, 0, :, :] >= 0.5)
self.assertAlmostEqual(np.min(coords[:, 0]), 10, delta=1)
self.assertAlmostEqual(np.max(coords[:, 0]), 12, delta=1)
self.assertAlmostEqual(np.min(coords[:, 1]), 10, delta=1)
self.assertAlmostEqual(np.max(coords[:, 1]), 12, delta=1)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class DeepMACMetaArchTest(tf.test.TestCase, parameterized.TestCase):
# TODO(vighneshb): Add batch_size > 1 tests for loss functions.
def setUp(self): # pylint:disable=g-missing-super-call
self.model = build_meta_arch()
def test_get_mask_head_input(self):
boxes = tf.constant([[[0., 0., 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]],
[[0., 0., 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]]],
dtype=tf.float32)
pixel_embedding = np.zeros((2, 32, 32, 4), dtype=np.float32)
pixel_embedding[0, :16, :16] = 1.0
pixel_embedding[0, 16:, 16:] = 2.0
pixel_embedding[1, :16, :16] = 3.0
pixel_embedding[1, 16:, 16:] = 4.0
pixel_embedding = tf.constant(pixel_embedding)
mask_inputs = self.model._get_mask_head_input(boxes, pixel_embedding)
self.assertEqual(mask_inputs.shape, (2, 2, 16, 16, 6))
y_grid, x_grid = tf.meshgrid(np.linspace(-1.0, 1.0, 16),
np.linspace(-1.0, 1.0, 16), indexing='ij')
for i, j in ([0, 0], [0, 1], [1, 0], [1, 1]):
self.assertAllClose(y_grid, mask_inputs[i, j, :, :, 0])
self.assertAllClose(x_grid, mask_inputs[i, j, :, :, 1])
zeros = np.zeros((16, 16, 4))
self.assertAllClose(zeros + 1, mask_inputs[0, 0, :, :, 2:])
self.assertAllClose(zeros + 2, mask_inputs[0, 1, :, :, 2:])
self.assertAllClose(zeros + 3, mask_inputs[1, 0, :, :, 2:])
self.assertAllClose(zeros + 4, mask_inputs[1, 1, :, :, 2:])
def test_get_mask_head_input_no_crop_resize(self):
model = build_meta_arch(predict_full_resolution_masks=True)
boxes = tf.constant([[[0., 0., 1.0, 1.0], [0.0, 0.0, 0.5, 1.0]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]])
pixel_embedding_np = np.random.randn(2, 32, 32, 4).astype(np.float32)
pixel_embedding = tf.constant(pixel_embedding_np)
mask_inputs = model._get_mask_head_input(boxes, pixel_embedding)
self.assertEqual(mask_inputs.shape, (2, 2, 32, 32, 6))
y_grid, x_grid = tf.meshgrid(np.linspace(.0, 1.0, 32),
np.linspace(.0, 1.0, 32), indexing='ij')
self.assertAllClose(y_grid - 0.5, mask_inputs[0, 0, :, :, 0])
self.assertAllClose(x_grid - 0.5, mask_inputs[0, 0, :, :, 1])
self.assertAllClose(y_grid - 0.25, mask_inputs[0, 1, :, :, 0])
self.assertAllClose(x_grid - 0.5, mask_inputs[0, 1, :, :, 1])
self.assertAllClose(y_grid - 0.75, mask_inputs[1, 0, :, :, 0])
self.assertAllClose(x_grid - 0.75, mask_inputs[1, 0, :, :, 1])
self.assertAllClose(y_grid, mask_inputs[1, 1, :, :, 0])
self.assertAllClose(x_grid, mask_inputs[1, 1, :, :, 1])
def test_get_instance_embeddings(self):
embeddings = np.zeros((2, 32, 32, 2))
embeddings[0, 8, 8] = 1.0
embeddings[0, 24, 16] = 2.0
embeddings[1, 8, 16] = 3.0
embeddings = tf.constant(embeddings)
boxes = np.zeros((2, 2, 4), dtype=np.float32)
boxes[0, 0] = [0.0, 0.0, 0.5, 0.5]
boxes[0, 1] = [0.5, 0.0, 1.0, 1.0]
boxes[1, 0] = [0.0, 0.0, 0.5, 1.0]
boxes = tf.constant(boxes)
center_embeddings = self.model._get_instance_embeddings(boxes, embeddings)
self.assertAllClose(center_embeddings[0, 0], [1.0, 1.0])
self.assertAllClose(center_embeddings[0, 1], [2.0, 2.0])
self.assertAllClose(center_embeddings[1, 0], [3.0, 3.0])
def test_get_groundtruth_mask_output(self):
boxes = np.zeros((2, 2, 4))
masks = np.zeros((2, 2, 32, 32))
boxes[0, 0] = [0.0, 0.0, 0.25, 0.25]
boxes[0, 1] = [0.75, 0.75, 1.0, 1.0]
boxes[1, 0] = [0.0, 0.0, 0.5, 1.0]
masks = np.zeros((2, 2, 32, 32), dtype=np.float32)
masks[0, 0, :16, :16] = 0.5
masks[0, 1, 16:, 16:] = 0.1
masks[1, 0, :17, :] = 0.3
masks = self.model._get_groundtruth_mask_output(boxes, masks)
self.assertEqual(masks.shape, (2, 2, 16, 16))
self.assertAllClose(masks[0, 0], np.zeros((16, 16)) + 0.5)
self.assertAllClose(masks[0, 1], np.zeros((16, 16)) + 0.1)
self.assertAllClose(masks[1, 0], np.zeros((16, 16)) + 0.3)
def test_get_groundtruth_mask_output_no_crop_resize(self):
model = build_meta_arch(predict_full_resolution_masks=True)
boxes = tf.zeros((2, 5, 4))
masks = tf.ones((2, 5, 32, 32))
masks = model._get_groundtruth_mask_output(boxes, masks)
self.assertAllClose(masks, np.ones((2, 5, 32, 32)))
def test_predict(self):
tf.keras.backend.set_learning_phase(True)
self.model.provide_groundtruth(
groundtruth_boxes_list=[tf.convert_to_tensor([[0., 0., 1., 1.]] * 5)],
groundtruth_classes_list=[tf.one_hot([1, 0, 1, 1, 1], depth=6)],
groundtruth_weights_list=[tf.ones(5)],
groundtruth_masks_list=[tf.ones((5, 32, 32))])
prediction = self.model.predict(tf.zeros((1, 32, 32, 3)), None)
self.assertEqual(prediction['MASK_LOGITS_GT_BOXES'][0].shape,
(1, 5, 16, 16))
def test_predict_self_supervised_deaugmented_mask_logits(self):
tf.keras.backend.set_learning_phase(True)
model = build_meta_arch(
augmented_self_supervision_loss_weight=1.0,
predict_full_resolution_masks=True)
model.provide_groundtruth(
groundtruth_boxes_list=[tf.convert_to_tensor([[0., 0., 1., 1.]] * 5)],
groundtruth_classes_list=[tf.one_hot([1, 0, 1, 1, 1], depth=6)],
groundtruth_weights_list=[tf.ones(5)],
groundtruth_masks_list=[tf.ones((5, 32, 32))])
prediction = model.predict(tf.zeros((1, 32, 32, 3)), None)
self.assertEqual(prediction['MASK_LOGITS_GT_BOXES'][0].shape,
(1, 5, 8, 8))
self.assertEqual(
prediction['SELF_SUPERVISED_DEAUGMENTED_MASK_LOGITS'][0].shape,
(1, 5, 8, 8))
def test_loss(self):
model = build_meta_arch()
boxes = tf.constant([[[0.0, 0.0, 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]]])
masks = np.zeros((1, 2, 32, 32), dtype=np.float32)
masks[0, 0, :16, :16] = 1.0
masks[0, 1, 16:, 16:] = 1.0
masks_pred = tf.fill((1, 2, 32, 32), 0.9)
classes = tf.zeros((1, 2, 5))
loss_dict = model._compute_deepmac_losses(
boxes, masks_pred, masks, classes, tf.zeros((1, 16, 16, 3)))
self.assertAllClose(
loss_dict[deepmac_meta_arch.DEEP_MASK_ESTIMATION],
np.zeros((1, 2)) - tf.math.log(tf.nn.sigmoid(0.9)))
def test_loss_no_crop_resize(self):
model = build_meta_arch(predict_full_resolution_masks=True)
boxes = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
masks = tf.ones((1, 2, 128, 128), dtype=tf.float32)
masks_pred = tf.fill((1, 2, 32, 32), 0.9)
classes = tf.zeros((1, 2, 5))
loss_dict = model._compute_deepmac_losses(
boxes, masks_pred, masks, classes, tf.zeros((1, 32, 32, 3)))
self.assertAllClose(
loss_dict[deepmac_meta_arch.DEEP_MASK_ESTIMATION],
np.zeros((1, 2)) - tf.math.log(tf.nn.sigmoid(0.9)))
def test_loss_no_crop_resize_dice(self):
model = build_meta_arch(predict_full_resolution_masks=True,
use_dice_loss=True)
boxes = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
masks = np.ones((1, 2, 128, 128), dtype=np.float32)
masks = tf.constant(masks)
masks_pred = tf.fill((1, 2, 32, 32), 0.9)
classes = tf.zeros((1, 2, 5))
loss_dict = model._compute_deepmac_losses(
boxes, masks_pred, masks, classes, tf.zeros((1, 32, 32, 3)))
pred = tf.nn.sigmoid(0.9)
expected = (1.0 - ((2.0 * pred) / (1.0 + pred)))
self.assertAllClose(loss_dict[deepmac_meta_arch.DEEP_MASK_ESTIMATION],
[[expected, expected]], rtol=1e-3)
def test_empty_masks(self):
boxes = tf.zeros([1, 0, 4])
masks = tf.zeros([1, 0, 128, 128])
classes = tf.zeros((1, 2, 5))
loss_dict = self.model._compute_deepmac_losses(
boxes, masks, masks, classes,
tf.zeros((1, 16, 16, 3)))
self.assertEqual(loss_dict[deepmac_meta_arch.DEEP_MASK_ESTIMATION].shape,
(1, 0))
def test_postprocess(self):
model = build_meta_arch()
model._mask_net = MockMaskNet()
boxes = np.zeros((2, 3, 4), dtype=np.float32)
boxes[:, :, [0, 2]] = 0.0
boxes[:, :, [1, 3]] = 8.0
boxes = tf.constant(boxes)
masks = model._postprocess_masks(
boxes, tf.zeros((2, 32, 32, 2)), tf.zeros((2, 32, 32, 2)))
prob = tf.nn.sigmoid(0.9).numpy()
self.assertAllClose(masks, prob * np.ones((2, 3, 16, 16)))
def test_postprocess_emb_proj(self):
model = build_meta_arch(network_type='embedding_projection',
use_instance_embedding=False,
use_xy=False, pixel_embedding_dim=8,
use_dice_loss=True,
dice_loss_prediction_probability=True)
boxes = np.zeros((2, 3, 4), dtype=np.float32)
boxes[:, :, [0, 2]] = 0.0
boxes[:, :, [1, 3]] = 8.0
boxes = tf.constant(boxes)
masks = model._postprocess_masks(
boxes, tf.zeros((2, 32, 32, 2)), tf.zeros((2, 32, 32, 2)))
self.assertEqual(masks.shape, (2, 3, 16, 16))
def test_postprocess_emb_proj_fullres(self):
model = build_meta_arch(network_type='embedding_projection',
predict_full_resolution_masks=True,
use_instance_embedding=False,
pixel_embedding_dim=8, use_xy=False,
use_dice_loss=True)
boxes = np.zeros((2, 3, 4), dtype=np.float32)
boxes = tf.constant(boxes)
masks = model._postprocess_masks(
boxes, tf.zeros((2, 32, 32, 2)), tf.zeros((2, 32, 32, 2)))
self.assertEqual(masks.shape, (2, 3, 128, 128))
def test_postprocess_no_crop_resize_shape(self):
model = build_meta_arch(predict_full_resolution_masks=True)
model._mask_net = MockMaskNet()
boxes = np.zeros((2, 3, 4), dtype=np.float32)
boxes[:, :, [0, 2]] = 0.0
boxes[:, :, [1, 3]] = 8.0
boxes = tf.constant(boxes)
masks = model._postprocess_masks(
boxes, tf.zeros((2, 32, 32, 2)), tf.zeros((2, 32, 32, 2)))
prob = tf.nn.sigmoid(0.9).numpy()
self.assertAllClose(masks, prob * np.ones((2, 3, 128, 128)))
def test_transform_boxes_to_feature_coordinates(self):
batch_size = 2
model = build_meta_arch()
model._mask_net = MockMaskNet()
boxes = np.zeros((batch_size, 3, 4), dtype=np.float32)
boxes[:, :, [0, 2]] = 0.1
boxes[:, :, [1, 3]] = 0.5
boxes = tf.constant(boxes)
true_image_shapes = tf.constant([
[64, 32, 3], # Image 1 is padded during resizing.
[64, 64, 3], # Image 2 is not padded.
])
resized_image_height = 64
resized_image_width = 64
resized_image_shape = [
batch_size, resized_image_height, resized_image_width, 3
]
feature_map_height = 32
feature_map_width = 32
instance_embedding = tf.zeros(
(batch_size, feature_map_height, feature_map_width, 2))
expected_boxes = np.array([
[ # Image 1
# 0.1 * (64 / resized_image_height) * feature_map_height -> 3.2
# 0.5 * (32 / resized_image_width) * feature_map_width -> 8.0
[3.2, 8., 3.2, 8.],
[3.2, 8., 3.2, 8.],
[3.2, 8., 3.2, 8.],
],
[ # Image 2
# 0.1 * (64 / resized_image_height) * feature_map_height -> 3.2
# 0.5 * (64 / resized_image_width) * feature_map_width -> 16
[3.2, 16., 3.2, 16.],
[3.2, 16., 3.2, 16.],
[3.2, 16., 3.2, 16.],
],
])
box_strided = model._transform_boxes_to_feature_coordinates(
boxes, true_image_shapes, resized_image_shape, instance_embedding)
self.assertAllClose(box_strided, expected_boxes)
def test_fc_tf_function(self):
net = deepmac_meta_arch.MaskHeadNetwork('fully_connected', 8, mask_size=32)
call_func = tf.function(net.__call__)
out = call_func(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 8)), training=True)
self.assertEqual(out.shape, (2, 32, 32))
def test_box_consistency_loss(self):
boxes_gt = tf.constant([[[0., 0., 0.49, 1.0]]])
boxes_jittered = tf.constant([[[0.0, 0.0, 1.0, 1.0]]])
mask_prediction = np.zeros((1, 1, 32, 32)).astype(np.float32)
mask_prediction[0, 0, :24, :24] = 1.0
loss = self.model._compute_box_consistency_loss(
boxes_gt, boxes_jittered, tf.constant(mask_prediction))
yloss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.constant([1.0] * 8 + [0.0] * 8),
logits=[1.0] * 12 + [0.0] * 4)
xloss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.constant([1.0] * 16),
logits=[1.0] * 12 + [0.0] * 4)
yloss_mean = tf.reduce_mean(yloss)
xloss_mean = tf.reduce_mean(xloss)
self.assertAllClose(loss[0], [yloss_mean + xloss_mean])
def test_box_consistency_loss_with_tightness(self):
boxes_gt = tf.constant([[[0., 0., 0.49, 0.49]]])
boxes_jittered = None
mask_prediction = np.zeros((1, 1, 8, 8)).astype(np.float32) - 1e10
mask_prediction[0, 0, :4, :4] = 1e10
model = build_meta_arch(box_consistency_tightness=True,
predict_full_resolution_masks=True)
loss = model._compute_box_consistency_loss(
boxes_gt, boxes_jittered, tf.constant(mask_prediction))
self.assertAllClose(loss[0], [0.0])
def test_box_consistency_loss_gt_count(self):
boxes_gt = tf.constant([[
[0., 0., 1.0, 1.0],
[0., 0., 0.49, 0.49]]])
boxes_jittered = None
mask_prediction = np.zeros((1, 2, 32, 32)).astype(np.float32)
mask_prediction[0, 0, :16, :16] = 1.0
mask_prediction[0, 1, :8, :8] = 1.0
model = build_meta_arch(
box_consistency_loss_normalize='normalize_groundtruth_count',
predict_full_resolution_masks=True)
loss_func = (
model._compute_box_consistency_loss)
loss = loss_func(
boxes_gt, boxes_jittered, tf.constant(mask_prediction))
yloss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.constant([1.0] * 32),
logits=[1.0] * 16 + [0.0] * 16) / 32.0
yloss_mean = tf.reduce_sum(yloss)
xloss = yloss
xloss_mean = tf.reduce_sum(xloss)
self.assertAllClose(loss[0, 0], yloss_mean + xloss_mean)
yloss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.constant([1.0] * 16 + [0.0] * 16),
logits=[1.0] * 8 + [0.0] * 24) / 16.0
yloss_mean = tf.reduce_sum(yloss)
xloss = yloss
xloss_mean = tf.reduce_sum(xloss)
self.assertAllClose(loss[0, 1], yloss_mean + xloss_mean)
def test_box_consistency_loss_balanced(self):
boxes_gt = tf.constant([[
[0., 0., 0.49, 0.49]]])
boxes_jittered = None
mask_prediction = np.zeros((1, 1, 32, 32)).astype(np.float32)
mask_prediction[0, 0] = 1.0
model = build_meta_arch(box_consistency_loss_normalize='normalize_balanced',
predict_full_resolution_masks=True)
loss_func = tf.function(
model._compute_box_consistency_loss)
loss = loss_func(
boxes_gt, boxes_jittered, tf.constant(mask_prediction))
yloss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=[0.] * 16 + [1.0] * 16,
logits=[1.0] * 32)
yloss_mean = tf.reduce_sum(yloss) / 16.0
xloss_mean = yloss_mean
self.assertAllClose(loss[0, 0], yloss_mean + xloss_mean)
def test_box_consistency_dice_loss(self):
model = build_meta_arch(use_dice_loss=True)
boxes_gt = tf.constant([[[0., 0., 0.49, 1.0]]])
boxes_jittered = tf.constant([[[0.0, 0.0, 1.0, 1.0]]])
almost_inf = 1e10
mask_prediction = np.full((1, 1, 32, 32), -almost_inf, dtype=np.float32)
mask_prediction[0, 0, :24, :24] = almost_inf
loss = model._compute_box_consistency_loss(
boxes_gt, boxes_jittered, tf.constant(mask_prediction))
yloss = 1 - 6.0 / 7
xloss = 0.2
self.assertAllClose(loss, [[yloss + xloss]])
def test_feature_consistency_loss_full_res_shape(self):
model = build_meta_arch(use_dice_loss=True,
predict_full_resolution_masks=True)
boxes = tf.zeros((5, 3, 4))
img = tf.zeros((5, 32, 32, 3))
mask_logits = tf.zeros((5, 3, 32, 32))
loss = model._compute_feature_consistency_loss(
boxes, img, mask_logits)
self.assertEqual([5, 3], loss.shape)
def test_feature_consistency_1_threshold(self):
model = build_meta_arch(predict_full_resolution_masks=True,
feature_consistency_threshold=0.99)
boxes = tf.zeros((5, 3, 4))
img = tf.zeros((5, 32, 32, 3))
mask_logits = tf.zeros((5, 3, 32, 32)) - 1e4
loss = model._compute_feature_consistency_loss(
boxes, img, mask_logits)
self.assertAllClose(loss, np.zeros((5, 3)))
def test_box_consistency_dice_loss_full_res(self):
model = build_meta_arch(use_dice_loss=True,
predict_full_resolution_masks=True)
boxes_gt = tf.constant([[[0., 0., 1.0, 1.0]]])
boxes_jittered = None
size = 32
almost_inf = 1e10
mask_prediction = np.full((1, 1, size, size), -almost_inf, dtype=np.float32)
mask_prediction[0, 0, :(size // 2), :] = almost_inf
loss = model._compute_box_consistency_loss(
boxes_gt, boxes_jittered, tf.constant(mask_prediction))
self.assertAlmostEqual(loss[0, 0].numpy(), 1 / 3)
def test_get_lab_image_shape(self):
output = self.model._get_lab_image(tf.zeros((2, 4, 4, 3)))
self.assertEqual(output.shape, (2, 4, 4, 3))
def test_self_supervised_augmented_loss_identity(self):
model = build_meta_arch(predict_full_resolution_masks=True,
augmented_self_supervision_max_translation=0.0)
x = tf.random.uniform((2, 3, 32, 32), 0, 1)
boxes = tf.constant([[0., 0., 1., 1.]] * 6)
boxes = tf.reshape(boxes, [2, 3, 4])
x = tf.cast(x > 0, tf.float32)
x = (x - 0.5) * 2e40 # x is a tensor or large +ve or -ve values.
loss = model._compute_self_supervised_augmented_loss(x, x, boxes)
self.assertAlmostEqual(loss.numpy().sum(), 0.0)
def test_self_supervised_mse_augmented_loss_0(self):
model = build_meta_arch(predict_full_resolution_masks=True,
augmented_self_supervision_max_translation=0.0,
augmented_self_supervision_loss='loss_mse')
x = tf.random.uniform((2, 3, 32, 32), 0, 1)
boxes = tf.constant([[0., 0., 1., 1.]] * 6)
boxes = tf.reshape(boxes, [2, 3, 4])
loss = model._compute_self_supervised_augmented_loss(x, x, boxes)
self.assertAlmostEqual(loss.numpy().min(), 0.0)
self.assertAlmostEqual(loss.numpy().max(), 0.0)
def test_self_supervised_mse_loss_scale_equivalent(self):
model = build_meta_arch(predict_full_resolution_masks=True,
augmented_self_supervision_max_translation=0.0,
augmented_self_supervision_loss='loss_mse')
x = np.zeros((1, 3, 32, 32), dtype=np.float32) + 100.0
y = 0.0 * x.copy()
x[0, 0, :8, :8] = 0.0
y[0, 0, :8, :8] = 1.0
x[0, 1, :16, :16] = 0.0
y[0, 1, :16, :16] = 1.0
x[0, 2, :16, :16] = 0.0
x[0, 2, :8, :8] = 1.0
y[0, 2, :16, :16] = 0.0
boxes = np.array([[0., 0., 0.22, 0.22], [0., 0., 0.47, 0.47],
[0., 0., 0.47, 0.47]],
dtype=np.float32)
boxes = tf.reshape(tf.constant(boxes), [1, 3, 4])
loss = model._compute_self_supervised_augmented_loss(x, y, boxes)
self.assertEqual(loss.shape, (1, 3))
mse_1_minus_0 = (tf.nn.sigmoid(1.0) - tf.nn.sigmoid(0.0)).numpy()**2
self.assertAlmostEqual(loss.numpy()[0, 0], mse_1_minus_0)
self.assertAlmostEqual(loss.numpy()[0, 1], mse_1_minus_0)
self.assertAlmostEqual(loss.numpy()[0, 2], mse_1_minus_0 / 4.0)
def test_self_supervised_kldiv_augmented_loss_0(self):
model = build_meta_arch(predict_full_resolution_masks=True,
augmented_self_supervision_max_translation=0.0,
augmented_self_supervision_loss='loss_kl_div')
x = tf.random.uniform((2, 3, 32, 32), 0, 1)
boxes = tf.constant([[0., 0., 1., 1.]] * 6)
boxes = tf.reshape(boxes, [2, 3, 4])
loss = model._compute_self_supervised_augmented_loss(x, x, boxes)
self.assertAlmostEqual(loss.numpy().min(), 0.0)
self.assertAlmostEqual(loss.numpy().max(), 0.0)
def test_self_supervised_kldiv_scale_equivalent(self):
model = build_meta_arch(predict_full_resolution_masks=True,
augmented_self_supervision_max_translation=0.0,
augmented_self_supervision_loss='loss_kl_div')
pred = np.zeros((1, 2, 32, 32), dtype=np.float32) + 100.0
true = 0.0 * pred.copy()
pred[0, 0, :8, :8] = LOGIT_HALF
true[0, 0, :8, :8] = LOGIT_QUARTER
pred[0, 1, :16, :16] = LOGIT_HALF
true[0, 1, :16, :16] = LOGIT_QUARTER
boxes = np.array([[0., 0., 0.22, 0.22], [0., 0., 0.47, 0.47]],
dtype=np.float32)
boxes = tf.reshape(tf.constant(boxes), [1, 2, 4])
loss = model._compute_self_supervised_augmented_loss(
original_logits=pred, deaugmented_logits=true, boxes=boxes)
self.assertEqual(loss.shape, (1, 2))
expected = (3 * math.log(3) - 4 * math.log(2)) / 4.0
self.assertAlmostEqual(loss.numpy()[0, 0], expected, places=4)
self.assertAlmostEqual(loss.numpy()[0, 1], expected, places=4)
def test_self_supervision_warmup(self):
tf.keras.backend.set_learning_phase(True)
model = build_meta_arch(
use_dice_loss=True,
predict_full_resolution_masks=True,
network_type='cond_inst1',
dim=9,
pixel_embedding_dim=8,
use_instance_embedding=False,
use_xy=False,
augmented_self_supervision_loss_weight=1.0,
augmented_self_supervision_max_translation=0.5,
augmented_self_supervision_warmup_start=10,
augmented_self_supervision_warmup_steps=40)
num_stages = 1
prediction = {
'preprocessed_inputs': tf.random.normal((1, 32, 32, 3)),
'MASK_LOGITS_GT_BOXES': [tf.random.normal((1, 5, 8, 8))] * num_stages,
'SELF_SUPERVISED_DEAUGMENTED_MASK_LOGITS':
[tf.random.normal((1, 5, 8, 8))] * num_stages,
'object_center': [tf.random.normal((1, 8, 8, 6))] * num_stages,
'box/offset': [tf.random.normal((1, 8, 8, 2))] * num_stages,
'box/scale': [tf.random.normal((1, 8, 8, 2))] * num_stages,
'extracted_features': [tf.random.normal((3, 32, 32, 7))] * num_stages
}
boxes = [tf.convert_to_tensor([[0., 0., 1., 1.]] * 5)]
classes = [tf.one_hot([1, 0, 1, 1, 1], depth=6)]
weights = [tf.ones(5)]
masks = [tf.ones((5, 32, 32))]
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
training_step=5)
loss_at_5 = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
training_step=20)
loss_at_20 = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
training_step=50)
loss_at_50 = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
training_step=100)
loss_at_100 = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
loss_key = 'Loss/' + deepmac_meta_arch.DEEP_MASK_AUGMENTED_SELF_SUPERVISION
self.assertAlmostEqual(loss_at_5[loss_key].numpy(), 0.0)
self.assertGreater(loss_at_20[loss_key], 0.0)
self.assertAlmostEqual(loss_at_20[loss_key].numpy(),
loss_at_50[loss_key].numpy() / 4.0)
self.assertAlmostEqual(loss_at_50[loss_key].numpy(),
loss_at_100[loss_key].numpy())
def test_loss_keys(self):
model = build_meta_arch(
use_dice_loss=True,
augmented_self_supervision_loss_weight=1.0,
augmented_self_supervision_max_translation=0.5,
predict_full_resolution_masks=True)
prediction = {
'preprocessed_inputs': tf.random.normal((3, 32, 32, 3)),
'MASK_LOGITS_GT_BOXES': [tf.random.normal((3, 5, 8, 8))] * 2,
'object_center': [tf.random.normal((3, 8, 8, 6))] * 2,
'box/offset': [tf.random.normal((3, 8, 8, 2))] * 2,
'box/scale': [tf.random.normal((3, 8, 8, 2))] * 2,
'SELF_SUPERVISED_DEAUGMENTED_MASK_LOGITS': (
[tf.random.normal((3, 5, 8, 8))] * 2),
'extracted_features': [tf.random.normal((3, 32, 32, 7))] * 2
}
model.provide_groundtruth(
groundtruth_boxes_list=[
tf.convert_to_tensor([[0., 0., 1., 1.]] * 5)] * 3,
groundtruth_classes_list=[tf.one_hot([1, 0, 1, 1, 1], depth=6)] * 3,
groundtruth_weights_list=[tf.ones(5)] * 3,
groundtruth_masks_list=[tf.ones((5, 32, 32))] * 3,
groundtruth_keypoints_list=[tf.zeros((5, 10, 2))] * 3,
groundtruth_keypoint_depths_list=[tf.zeros((5, 10))] * 3)
loss = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
self.assertGreater(loss['Loss/deep_mask_estimation'], 0.0)
for weak_loss in deepmac_meta_arch.MASK_LOSSES:
if weak_loss == deepmac_meta_arch.DEEP_MASK_FEATURE_CONSISTENCY:
continue
self.assertGreater(loss['Loss/' + weak_loss], 0.0,
'{} was <= 0'.format(weak_loss))
def test_eval_loss_and_postprocess_keys(self):
model = build_meta_arch(
use_dice_loss=True,
augmented_self_supervision_loss_weight=1.0,
augmented_self_supervision_max_translation=0.5,
predict_full_resolution_masks=True)
true_image_shapes = tf.constant([[32, 32, 3]], dtype=tf.int32)
prediction_dict = model.predict(
tf.zeros((1, 32, 32, 3)), true_image_shapes)
output = model.postprocess(prediction_dict, true_image_shapes)
self.assertEqual(output['detection_boxes'].shape, (1, 5, 4))
self.assertEqual(output['detection_masks'].shape, (1, 5, 128, 128))
model.provide_groundtruth(
groundtruth_boxes_list=[
tf.convert_to_tensor([[0., 0., 1., 1.]] * 5)] * 1,
groundtruth_classes_list=[tf.one_hot([1, 0, 1, 1, 1], depth=6)] * 1,
groundtruth_weights_list=[tf.ones(5)] * 1,
groundtruth_masks_list=[tf.ones((5, 32, 32))] * 1,
groundtruth_keypoints_list=[tf.zeros((5, 10, 2))] * 1,
groundtruth_keypoint_depths_list=[tf.zeros((5, 10))] * 1)
prediction_dict = model.predict(
tf.zeros((1, 32, 32, 3)), true_image_shapes)
model.loss(prediction_dict, true_image_shapes)
def test_loss_weight_response(self):
tf.random.set_seed(12)
model = build_meta_arch(
use_dice_loss=True,
predict_full_resolution_masks=True,
network_type='cond_inst1',
dim=9,
pixel_embedding_dim=8,
use_instance_embedding=False,
use_xy=False,
augmented_self_supervision_loss_weight=1.0,
augmented_self_supervision_max_translation=0.5,
)
num_stages = 1
prediction = {
'preprocessed_inputs': tf.random.normal((1, 32, 32, 3)),
'MASK_LOGITS_GT_BOXES': [tf.random.normal((1, 5, 8, 8))] * num_stages,
'object_center': [tf.random.normal((1, 8, 8, 6))] * num_stages,
'box/offset': [tf.random.normal((1, 8, 8, 2))] * num_stages,
'box/scale': [tf.random.normal((1, 8, 8, 2))] * num_stages,
'SELF_SUPERVISED_DEAUGMENTED_MASK_LOGITS': (
[tf.random.normal((1, 5, 8, 8))] * num_stages),
'extracted_features': [tf.random.normal((3, 32, 32, 7))] * num_stages
}
boxes = [tf.convert_to_tensor([[0., 0., 1., 1.]] * 5)]
classes = [tf.one_hot([1, 0, 1, 1, 1], depth=6)]
weights = [tf.ones(5)]
masks = [tf.ones((5, 32, 32))]
keypoints = [tf.zeros((5, 10, 2))]
keypoint_depths = [tf.ones((5, 10))]
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
groundtruth_keypoints_list=keypoints,
groundtruth_keypoint_depths_list=keypoint_depths)
loss = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
self.assertGreater(loss['Loss/deep_mask_estimation'], 0.0)
for mask_loss in deepmac_meta_arch.MASK_LOSSES:
self.assertGreater(loss['Loss/' + mask_loss], 0.0,
'{} was <= 0'.format(mask_loss))
rng = random.Random(0)
loss_weights = {
deepmac_meta_arch.DEEP_MASK_ESTIMATION: rng.uniform(1, 5),
deepmac_meta_arch.DEEP_MASK_BOX_CONSISTENCY: rng.uniform(1, 5),
deepmac_meta_arch.DEEP_MASK_FEATURE_CONSISTENCY: rng.uniform(1, 5),
deepmac_meta_arch.DEEP_MASK_AUGMENTED_SELF_SUPERVISION: (
rng.uniform(1, 5)),
deepmac_meta_arch.DEEP_MASK_POINTLY_SUPERVISED: rng.uniform(1, 5)
}
weighted_model = build_meta_arch(
use_dice_loss=True,
predict_full_resolution_masks=True,
network_type='cond_inst1',
dim=9,
pixel_embedding_dim=8,
use_instance_embedding=False,
use_xy=False,
task_loss_weight=loss_weights[deepmac_meta_arch.DEEP_MASK_ESTIMATION],
box_consistency_loss_weight=(
loss_weights[deepmac_meta_arch.DEEP_MASK_BOX_CONSISTENCY]),
feature_consistency_loss_weight=(
loss_weights[deepmac_meta_arch.DEEP_MASK_FEATURE_CONSISTENCY]),
augmented_self_supervision_loss_weight=(
loss_weights[deepmac_meta_arch.DEEP_MASK_AUGMENTED_SELF_SUPERVISION]
),
pointly_supervised_keypoint_loss_weight=(
loss_weights[deepmac_meta_arch.DEEP_MASK_POINTLY_SUPERVISED])
)
weighted_model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
groundtruth_keypoints_list=keypoints,
groundtruth_keypoint_depths_list=keypoint_depths)
weighted_loss = weighted_model.loss(prediction, tf.constant([[32, 32, 3]]))
for mask_loss in deepmac_meta_arch.MASK_LOSSES:
loss_key = 'Loss/' + mask_loss
self.assertAllEqual(
weighted_loss[loss_key], loss[loss_key] * loss_weights[mask_loss],
f'{mask_loss} did not respond to change in weight.')
@parameterized.parameters(
[dict(feature_consistency_type='consistency_default_lab',
feature_consistency_comparison='comparison_default_gaussian'),
dict(feature_consistency_type='consistency_feature_map',
feature_consistency_comparison='comparison_normalized_dotprod')],
)
def test_feature_consistency_warmup(
self, feature_consistency_type, feature_consistency_comparison):
tf.keras.backend.set_learning_phase(True)
model = build_meta_arch(
use_dice_loss=True,
predict_full_resolution_masks=True,
network_type='cond_inst1',
dim=9,
pixel_embedding_dim=8,
use_instance_embedding=False,
use_xy=False,
feature_consistency_warmup_steps=10,
feature_consistency_warmup_start=10,
feature_consistency_type=feature_consistency_type,
feature_consistency_comparison=feature_consistency_comparison)
num_stages = 1
prediction = {
'preprocessed_inputs': tf.random.normal((1, 32, 32, 3)),
'MASK_LOGITS_GT_BOXES': [tf.random.normal((1, 5, 8, 8))] * num_stages,
'object_center': [tf.random.normal((1, 8, 8, 6))] * num_stages,
'box/offset': [tf.random.normal((1, 8, 8, 2))] * num_stages,
'box/scale': [tf.random.normal((1, 8, 8, 2))] * num_stages,
'extracted_features': [tf.random.normal((3, 32, 32, 7))] * num_stages
}
boxes = [tf.convert_to_tensor([[0., 0., 1., 1.]] * 5)]
classes = [tf.one_hot([1, 0, 1, 1, 1], depth=6)]
weights = [tf.ones(5)]
masks = [tf.ones((5, 32, 32))]
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
training_step=5)
loss_at_5 = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
training_step=15)
loss_at_15 = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
training_step=20)
loss_at_20 = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
model.provide_groundtruth(
groundtruth_boxes_list=boxes,
groundtruth_classes_list=classes,
groundtruth_weights_list=weights,
groundtruth_masks_list=masks,
training_step=100)
loss_at_100 = model.loss(prediction, tf.constant([[32, 32, 3.0]]))
loss_key = 'Loss/' + deepmac_meta_arch.DEEP_MASK_FEATURE_CONSISTENCY
self.assertAlmostEqual(loss_at_5[loss_key].numpy(), 0.0)
self.assertGreater(loss_at_15[loss_key], 0.0)
self.assertAlmostEqual(loss_at_15[loss_key].numpy(),
loss_at_20[loss_key].numpy() / 2.0)
self.assertAlmostEqual(loss_at_20[loss_key].numpy(),
loss_at_100[loss_key].numpy())
def test_pointly_supervised_loss(self):
tf.keras.backend.set_learning_phase(True)
model = build_meta_arch(
use_dice_loss=False,
predict_full_resolution_masks=True,
network_type='cond_inst1',
dim=9,
pixel_embedding_dim=8,
use_instance_embedding=False,
use_xy=False,
pointly_supervised_keypoint_loss_weight=1.0)
mask_logits = np.zeros((1, 1, 32, 32), dtype=np.float32)
keypoints = np.zeros((1, 1, 1, 2), dtype=np.float32)
keypoint_depths = np.zeros((1, 1, 1), dtype=np.float32)
keypoints[..., 0] = 0.5
keypoints[..., 1] = 0.5
keypoint_depths[..., 0] = 1.0
mask_logits[:, :, 16, 16] = 1.0
expected_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=[[1.0]], labels=[[1.0]]
).numpy()
loss = model._compute_pointly_supervised_loss_from_keypoints(
mask_logits, keypoints, keypoint_depths)
self.assertEqual(loss.shape, (1, 1))
self.assertAllClose(expected_loss, loss)
def test_ignore_per_class_box_overlap(self):
tf.keras.backend.set_learning_phase(True)
model = build_meta_arch(
use_dice_loss=False,
predict_full_resolution_masks=True,
network_type='cond_inst1',
dim=9,
pixel_embedding_dim=8,
use_instance_embedding=False,
use_xy=False,
pointly_supervised_keypoint_loss_weight=1.0,
ignore_per_class_box_overlap=True)
self.assertTrue(model._deepmac_params.ignore_per_class_box_overlap)
mask_logits = tf.zeros((2, 3, 16, 16))
mask_gt = tf.zeros((2, 3, 32, 32))
boxes = tf.zeros((2, 3, 4))
classes = tf.zeros((2, 3, 5))
loss = model._compute_mask_prediction_loss(
boxes, mask_logits, mask_gt, classes)
self.assertEqual(loss.shape, (2, 3))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FullyConnectedMaskHeadTest(tf.test.TestCase):
def test_fc_mask_head(self):
head = deepmac_meta_arch.FullyConnectedMaskHead(512, 16)
inputs = tf.random.uniform([100, 16, 16, 512])
output = head(inputs)
self.assertAllEqual([100, 16, 16, 1], output.numpy().shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ResNetMaskHeadTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(['resnet4', 'resnet8', 'resnet20'])
def test_forward(self, name):
net = deepmac_meta_arch.ResNetMaskNetwork(name, 8)
out = net(tf.zeros((3, 32, 32, 16)))
self.assertEqual(out.shape[:3], (3, 32, 32))
if __name__ == '__main__':
tf.test.main()
| 66,519 | 36.391793 | 108 | py |
models | models-master/research/object_detection/meta_architectures/deepmac_meta_arch.py | """Deep Mask heads above CenterNet (DeepMAC)[1] architecture.
[1]: https://arxiv.org/abs/2104.00613
"""
import collections
from absl import logging
import numpy as np
import tensorflow as tf
from object_detection.builders import losses_builder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import losses
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.models.keras_models import hourglass_network
from object_detection.models.keras_models import resnet_v1
from object_detection.protos import center_net_pb2
from object_detection.protos import losses_pb2
from object_detection.utils import shape_utils
from object_detection.utils import spatial_transform_ops
from object_detection.utils import tf_version
if tf_version.is_tf2():
import tensorflow_io as tfio # pylint:disable=g-import-not-at-top
INSTANCE_EMBEDDING = 'INSTANCE_EMBEDDING'
PIXEL_EMBEDDING = 'PIXEL_EMBEDDING'
MASK_LOGITS_GT_BOXES = 'MASK_LOGITS_GT_BOXES'
DEEP_MASK_ESTIMATION = 'deep_mask_estimation'
DEEP_MASK_BOX_CONSISTENCY = 'deep_mask_box_consistency'
DEEP_MASK_FEATURE_CONSISTENCY = 'deep_mask_feature_consistency'
DEEP_MASK_POINTLY_SUPERVISED = 'deep_mask_pointly_supervised'
SELF_SUPERVISED_DEAUGMENTED_MASK_LOGITS = (
'SELF_SUPERVISED_DEAUGMENTED_MASK_LOGITS')
DEEP_MASK_AUGMENTED_SELF_SUPERVISION = 'deep_mask_augmented_self_supervision'
CONSISTENCY_FEATURE_MAP = 'CONSISTENCY_FEATURE_MAP'
LOSS_KEY_PREFIX = center_net_meta_arch.LOSS_KEY_PREFIX
NEIGHBORS_2D = [[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 1],
[1, -1], [1, 0], [1, 1]]
WEAK_LOSSES = [DEEP_MASK_BOX_CONSISTENCY, DEEP_MASK_FEATURE_CONSISTENCY,
DEEP_MASK_AUGMENTED_SELF_SUPERVISION,
DEEP_MASK_POINTLY_SUPERVISED]
MASK_LOSSES = WEAK_LOSSES + [DEEP_MASK_ESTIMATION]
DeepMACParams = collections.namedtuple('DeepMACParams', [
'classification_loss', 'dim', 'task_loss_weight', 'pixel_embedding_dim',
'allowed_masked_classes_ids', 'mask_size', 'mask_num_subsamples',
'use_xy', 'network_type', 'use_instance_embedding', 'num_init_channels',
'predict_full_resolution_masks', 'postprocess_crop_size',
'max_roi_jitter_ratio', 'roi_jitter_mode',
'box_consistency_loss_weight', 'feature_consistency_threshold',
'feature_consistency_dilation', 'feature_consistency_loss_weight',
'box_consistency_loss_normalize', 'box_consistency_tightness',
'feature_consistency_warmup_steps', 'feature_consistency_warmup_start',
'use_only_last_stage', 'augmented_self_supervision_max_translation',
'augmented_self_supervision_loss_weight',
'augmented_self_supervision_flip_probability',
'augmented_self_supervision_warmup_start',
'augmented_self_supervision_warmup_steps',
'augmented_self_supervision_loss',
'augmented_self_supervision_scale_min',
'augmented_self_supervision_scale_max',
'pointly_supervised_keypoint_loss_weight',
'ignore_per_class_box_overlap',
'feature_consistency_type',
'feature_consistency_comparison'
])
def _get_loss_weight(loss_name, config):
"""Utility function to get loss weights by name."""
if loss_name == DEEP_MASK_ESTIMATION:
return config.task_loss_weight
elif loss_name == DEEP_MASK_FEATURE_CONSISTENCY:
return config.feature_consistency_loss_weight
elif loss_name == DEEP_MASK_BOX_CONSISTENCY:
return config.box_consistency_loss_weight
elif loss_name == DEEP_MASK_AUGMENTED_SELF_SUPERVISION:
return config.augmented_self_supervision_loss_weight
elif loss_name == DEEP_MASK_POINTLY_SUPERVISED:
return config.pointly_supervised_keypoint_loss_weight
else:
raise ValueError('Unknown loss - {}'.format(loss_name))
def subsample_instances(classes, weights, boxes, masks, num_subsamples):
"""Randomly subsamples instances to the desired number.
Args:
classes: [num_instances, num_classes] float tensor of one-hot encoded
classes.
weights: [num_instances] float tensor of weights of each instance.
boxes: [num_instances, 4] tensor of box coordinates.
masks: [num_instances, height, width] tensor of per-instance masks.
num_subsamples: int, the desired number of samples.
Returns:
classes: [num_subsamples, num_classes] float tensor of classes.
weights: [num_subsamples] float tensor of weights.
boxes: [num_subsamples, 4] float tensor of box coordinates.
masks: [num_subsamples, height, width] float tensor of per-instance masks.
"""
if num_subsamples <= -1:
return classes, weights, boxes, masks
num_instances = tf.reduce_sum(tf.cast(weights > 0.5, tf.int32))
if num_instances <= num_subsamples:
return (classes[:num_subsamples], weights[:num_subsamples],
boxes[:num_subsamples], masks[:num_subsamples])
else:
random_index = tf.random.uniform([num_subsamples], 0, num_instances,
dtype=tf.int32)
return (tf.gather(classes, random_index), tf.gather(weights, random_index),
tf.gather(boxes, random_index), tf.gather(masks, random_index))
def _get_deepmac_network_by_type(name, num_init_channels, mask_size=None):
"""Get DeepMAC network model given a string type."""
if name.startswith('hourglass'):
if name == 'hourglass10':
return hourglass_network.hourglass_10(num_init_channels,
initial_downsample=False)
elif name == 'hourglass20':
return hourglass_network.hourglass_20(num_init_channels,
initial_downsample=False)
elif name == 'hourglass32':
return hourglass_network.hourglass_32(num_init_channels,
initial_downsample=False)
elif name == 'hourglass52':
return hourglass_network.hourglass_52(num_init_channels,
initial_downsample=False)
elif name == 'hourglass100':
return hourglass_network.hourglass_100(num_init_channels,
initial_downsample=False)
elif name == 'hourglass20_uniform_size':
return hourglass_network.hourglass_20_uniform_size(num_init_channels)
elif name == 'hourglass20_no_shortcut':
return hourglass_network.hourglass_20_no_shortcut(num_init_channels)
elif name == 'fully_connected':
if not mask_size:
raise ValueError('Mask size must be set.')
return FullyConnectedMaskHead(num_init_channels, mask_size)
elif _is_mask_head_param_free(name):
return tf.keras.layers.Lambda(lambda x: x)
elif name.startswith('resnet'):
return ResNetMaskNetwork(name, num_init_channels)
raise ValueError('Unknown network type {}'.format(name))
def boxes_batch_normalized_to_absolute_coordinates(boxes, height, width):
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=2)
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
ymin *= height
ymax *= height
xmin *= width
xmax *= width
return tf.stack([ymin, xmin, ymax, xmax], axis=2)
def boxes_batch_absolute_to_normalized_coordinates(boxes, height, width):
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=2)
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
ymin /= height
ymax /= height
xmin /= width
xmax /= width
return tf.stack([ymin, xmin, ymax, xmax], axis=2)
def _resize_instance_masks_non_empty(masks, shape):
"""Resize a non-empty tensor of masks to the given shape."""
height, width = shape
flattened_masks, batch_size, num_instances = flatten_first2_dims(masks)
flattened_masks = flattened_masks[:, :, :, tf.newaxis]
flattened_masks = tf.image.resize(
flattened_masks, (height, width),
method=tf.image.ResizeMethod.BILINEAR)
return unpack_first2_dims(
flattened_masks[:, :, :, 0], batch_size, num_instances)
def resize_instance_masks(masks, shape):
batch_size, num_instances = tf.shape(masks)[0], tf.shape(masks)[1]
return tf.cond(
tf.shape(masks)[1] == 0,
lambda: tf.zeros((batch_size, num_instances, shape[0], shape[1])),
lambda: _resize_instance_masks_non_empty(masks, shape))
def filter_masked_classes(masked_class_ids, classes, weights, masks):
"""Filter out masks whose class IDs are not present in masked_class_ids.
Args:
masked_class_ids: A list of class IDs allowed to have masks. These class IDs
are 1-indexed.
classes: A [batch_size, num_instances, num_classes] float tensor containing
the one-hot encoded classes.
weights: A [batch_size, num_instances] float tensor containing the weights
of each sample.
masks: A [batch_size, num_instances, height, width] tensor containing the
mask per instance.
Returns:
classes_filtered: A [batch_size, num_instances, num_classes] float tensor
containing the one-hot encoded classes with classes not in
masked_class_ids zeroed out.
weights_filtered: A [batch_size, num_instances] float tensor containing the
weights of each sample with instances whose classes aren't in
masked_class_ids zeroed out.
masks_filtered: A [batch_size, num_instances, height, width] tensor
containing the mask per instance with masks not belonging to
masked_class_ids zeroed out.
"""
if len(masked_class_ids) == 0: # pylint:disable=g-explicit-length-test
return classes, weights, masks
if tf.shape(classes)[1] == 0:
return classes, weights, masks
masked_class_ids = tf.constant(np.array(masked_class_ids, dtype=np.int32))
label_id_offset = 1
masked_class_ids -= label_id_offset
class_ids = tf.argmax(classes, axis=2, output_type=tf.int32)
matched_classes = tf.equal(
class_ids[:, :, tf.newaxis], masked_class_ids[tf.newaxis, tf.newaxis, :]
)
matched_classes = tf.reduce_any(matched_classes, axis=2)
matched_classes = tf.cast(matched_classes, tf.float32)
return (
classes * matched_classes[:, :, tf.newaxis],
weights * matched_classes,
masks * matched_classes[:, :, tf.newaxis, tf.newaxis]
)
def per_instance_no_class_overlap(classes, boxes, height, width):
"""Returns 1s inside boxes but overlapping boxes of same class are zeroed out.
Args:
classes: A [batch_size, num_instances, num_classes] float tensor containing
the one-hot encoded classes.
boxes: A [batch_size, num_instances, 4] shaped float tensor of normalized
boxes.
height: int, height of the desired mask.
width: int, width of the desired mask.
Returns:
mask: A [batch_size, num_instances, height, width] float tensor of 0s and
1s.
"""
box_mask = fill_boxes(boxes, height, width)
per_class_box_mask = (
box_mask[:, :, tf.newaxis, :, :] *
classes[:, :, :, tf.newaxis, tf.newaxis])
per_class_instance_count = tf.reduce_sum(per_class_box_mask, axis=1)
per_class_valid_map = per_class_instance_count < 2
class_indices = tf.argmax(classes, axis=2)
per_instance_valid_map = tf.gather(
per_class_valid_map, class_indices, batch_dims=1)
return tf.cast(per_instance_valid_map, tf.float32)
def flatten_first2_dims(tensor):
"""Flatten first 2 dimensions of a tensor.
Args:
tensor: A tensor with shape [M, N, ....]
Returns:
flattened_tensor: A tensor of shape [M * N, ...]
M: int, the length of the first dimension of the input.
N: int, the length of the second dimension of the input.
"""
shape = tf.shape(tensor)
d1, d2, rest = shape[0], shape[1], shape[2:]
tensor = tf.reshape(
tensor, tf.concat([[d1 * d2], rest], axis=0))
return tensor, d1, d2
def unpack_first2_dims(tensor, dim1, dim2):
"""Unpack the flattened first dimension of the tensor into 2 dimensions.
Args:
tensor: A tensor of shape [dim1 * dim2, ...]
dim1: int, the size of the first dimension.
dim2: int, the size of the second dimension.
Returns:
unflattened_tensor: A tensor of shape [dim1, dim2, ...].
"""
shape = tf.shape(tensor)
result_shape = tf.concat([[dim1, dim2], shape[1:]], axis=0)
return tf.reshape(tensor, result_shape)
def crop_and_resize_instance_masks(masks, boxes, mask_size):
"""Crop and resize each mask according to the given boxes.
Args:
masks: A [B, N, H, W] float tensor.
boxes: A [B, N, 4] float tensor of normalized boxes.
mask_size: int, the size of the output masks.
Returns:
masks: A [B, N, mask_size, mask_size] float tensor of cropped and resized
instance masks.
"""
masks, batch_size, num_instances = flatten_first2_dims(masks)
boxes, _, _ = flatten_first2_dims(boxes)
cropped_masks = spatial_transform_ops.matmul_crop_and_resize(
masks[:, :, :, tf.newaxis], boxes[:, tf.newaxis, :],
[mask_size, mask_size])
cropped_masks = tf.squeeze(cropped_masks, axis=[1, 4])
return unpack_first2_dims(cropped_masks, batch_size, num_instances)
def fill_boxes(boxes, height, width, expand=0):
"""Fills the area included in the boxes with 1s.
Args:
boxes: A [batch_size, num_instances, 4] shaped float tensor of boxes given
in the normalized coordinate space.
height: int, height of the output image.
width: int, width of the output image.
expand: int, the number of pixels to expand the box by.
Returns:
filled_boxes: A [batch_size, num_instances, height, width] shaped float
tensor with 1s in the area that falls inside each box.
"""
expand = float(expand)
boxes_abs = boxes_batch_normalized_to_absolute_coordinates(
boxes, height, width)
ymin, xmin, ymax, xmax = tf.unstack(
boxes_abs[:, :, tf.newaxis, tf.newaxis, :], 4, axis=4)
ygrid, xgrid = tf.meshgrid(tf.range(height), tf.range(width), indexing='ij')
ygrid, xgrid = tf.cast(ygrid, tf.float32), tf.cast(xgrid, tf.float32)
ygrid, xgrid = (ygrid[tf.newaxis, tf.newaxis, :, :],
xgrid[tf.newaxis, tf.newaxis, :, :])
ymin -= expand
xmin -= expand
ymax += expand
xmax += expand
filled_boxes = tf.logical_and(
tf.logical_and(ygrid >= ymin, ygrid <= ymax),
tf.logical_and(xgrid >= xmin, xgrid <= xmax))
return tf.cast(filled_boxes, tf.float32)
def embedding_projection(x, y):
"""Compute dot product between two given embeddings.
Args:
x: [num_instances, height, width, dimension] float tensor input.
y: [num_instances, height, width, dimension] or
[num_instances, 1, 1, dimension] float tensor input. When the height
and width dimensions are 1, TF will broadcast it.
Returns:
dist: [num_instances, height, width, 1] A float tensor returning
the per-pixel embedding projection.
"""
dot = tf.reduce_sum(x * y, axis=3, keepdims=True)
return dot
def _get_2d_neighbors_kernel():
"""Returns a conv. kernel that when applies generates 2D neighbors.
Returns:
kernel: A float tensor of shape [3, 3, 1, 8]
"""
kernel = np.zeros((3, 3, 1, 8))
for i, (y, x) in enumerate(NEIGHBORS_2D):
kernel[1 + y, 1 + x, 0, i] = 1.0
return tf.constant(kernel, dtype=tf.float32)
def generate_2d_neighbors(input_tensor, dilation=2):
"""Generate a feature map of 2D neighbors.
Note: This op makes 8 (# of neighbors) as the leading dimension so that
following ops on TPU won't have to pad the last dimension to 128.
Args:
input_tensor: A float tensor of shape [batch_size, height, width, channels].
dilation: int, the dilation factor for considering neighbors.
Returns:
output: A float tensor of all 8 2-D neighbors. of shape
[8, batch_size, height, width, channels].
"""
# TODO(vighneshb) Minimize tranposing here to save memory.
# input_tensor: [B, C, H, W]
input_tensor = tf.transpose(input_tensor, (0, 3, 1, 2))
# input_tensor: [B, C, H, W, 1]
input_tensor = input_tensor[:, :, :, :, tf.newaxis]
# input_tensor: [B * C, H, W, 1]
input_tensor, batch_size, channels = flatten_first2_dims(input_tensor)
kernel = _get_2d_neighbors_kernel()
# output: [B * C, H, W, 8]
output = tf.nn.atrous_conv2d(input_tensor, kernel, rate=dilation,
padding='SAME')
# output: [B, C, H, W, 8]
output = unpack_first2_dims(output, batch_size, channels)
# return: [8, B, H, W, C]
return tf.transpose(output, [4, 0, 2, 3, 1])
def normalize_feature_map(feature_map):
return tf.math.l2_normalize(feature_map, axis=3, epsilon=1e-4)
def gaussian_pixel_similarity(a, b, theta):
norm_difference = tf.linalg.norm(a - b, axis=-1)
similarity = tf.exp(-norm_difference / theta)
return similarity
def dotprod_pixel_similarity(a, b):
return tf.reduce_sum(a * b, axis=-1)
def dilated_cross_pixel_similarity(feature_map, dilation=2, theta=2.0,
method='gaussian'):
"""Dilated cross pixel similarity.
method supports 2 values
- 'gaussian' from https://arxiv.org/abs/2012.02310
- 'dotprod' computes the dot product between feature vector for similarity.
This assumes that the features are normalized.
Args:
feature_map: A float tensor of shape [batch_size, height, width, channels]
dilation: int, the dilation factor.
theta: The denominator while taking difference inside the gaussian.
method: str, either 'gaussian' or 'dotprod'.
Returns:
dilated_similarity: A tensor of shape [8, batch_size, height, width]
"""
neighbors = generate_2d_neighbors(feature_map, dilation)
feature_map = feature_map[tf.newaxis]
if method == 'gaussian':
return gaussian_pixel_similarity(feature_map, neighbors, theta=theta)
elif method == 'dotprod':
return dotprod_pixel_similarity(feature_map, neighbors)
else:
raise ValueError('Unknown method for pixel sim %s' % method)
def dilated_cross_same_mask_label(instance_masks, dilation=2):
"""Dilated cross pixel similarity as defined in [1].
[1]: https://arxiv.org/abs/2012.02310
Args:
instance_masks: A float tensor of shape [batch_size, num_instances,
height, width]
dilation: int, the dilation factor.
Returns:
dilated_same_label: A tensor of shape [8, batch_size, num_instances,
height, width]
"""
# instance_masks: [batch_size, height, width, num_instances]
instance_masks = tf.transpose(instance_masks, (0, 2, 3, 1))
# neighbors: [8, batch_size, height, width, num_instances]
neighbors = generate_2d_neighbors(instance_masks, dilation)
# instance_masks = [1, batch_size, height, width, num_instances]
instance_masks = instance_masks[tf.newaxis]
same_mask_prob = ((instance_masks * neighbors) +
((1 - instance_masks) * (1 - neighbors)))
return tf.transpose(same_mask_prob, (0, 1, 4, 2, 3))
def _per_pixel_single_conv(input_tensor, params, channels):
"""Convolve the given input with the given params.
Args:
input_tensor: A [num_instances, height, width, channels] shaped
float tensor.
params: A [num_instances, num_params] shaped float tensor.
channels: int, number of channels in the convolution.
Returns:
output: A float tensor of shape [num_instances, height, width, channels]
"""
input_channels = input_tensor.get_shape().as_list()[3]
weights = params[:, :(input_channels * channels)]
biases = params[:, (input_channels * channels):]
num_instances = tf.shape(params)[0]
weights = tf.reshape(weights, (num_instances, input_channels, channels))
output = (input_tensor[:, :, tf.newaxis, :] @
weights[:, tf.newaxis, tf.newaxis, :, :])
output = output[:, :, 0, :, :]
output = output + biases[:, tf.newaxis, tf.newaxis, :]
return output
def per_pixel_conditional_conv(input_tensor, parameters, channels, depth):
"""Use parameters perform per-pixel convolutions with the given depth [1].
[1]: https://arxiv.org/abs/2003.05664
Args:
input_tensor: float tensor of shape [num_instances, height,
width, input_channels]
parameters: A [num_instances, num_params] float tensor. If num_params
is incomparible with the given channels and depth, a ValueError will
be raised.
channels: int, the number of channels in the convolution.
depth: int, the number of layers of convolutions to perform.
Returns:
output: A [num_instances, height, width] tensor with the conditional
conv applied according to each instance's parameters.
"""
input_channels = input_tensor.get_shape().as_list()[3]
num_params = parameters.get_shape().as_list()[1]
input_convs = 1 if depth > 1 else 0
intermediate_convs = depth - 2 if depth >= 2 else 0
expected_weights = ((input_channels * channels * input_convs) +
(channels * channels * intermediate_convs) +
channels) # final conv
expected_biases = (channels * (depth - 1)) + 1
if depth == 1:
if input_channels != channels:
raise ValueError(
'When depth=1, input_channels({}) should be equal to'.format(
input_channels) + ' channels({})'.format(channels))
if num_params != (expected_weights + expected_biases):
raise ValueError('Expected {} parameters at depth {}, but got {}'.format(
expected_weights + expected_biases, depth, num_params))
start = 0
output = input_tensor
for i in range(depth):
is_last_layer = i == (depth - 1)
if is_last_layer:
channels = 1
num_params_single_conv = channels * input_channels + channels
params = parameters[:, start:start + num_params_single_conv]
start += num_params_single_conv
output = _per_pixel_single_conv(output, params, channels)
if not is_last_layer:
output = tf.nn.relu(output)
input_channels = channels
return output
def flip_boxes_left_right(boxes):
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=2)
return tf.stack(
[ymin, 1.0 - xmax, ymax, 1.0 - xmin], axis=2
)
def transform_images_and_boxes(images, boxes, tx, ty, sx, sy, flip):
"""Translate and scale a batch of images and boxes by the given amount.
The function first translates and then scales the image and assumes the
origin to be at the center of the image.
Args:
images: A [batch_size, height, width, 3] float tensor of images.
boxes: optional, A [batch_size, num_instances, 4] shaped float tensor of
normalized bounding boxes. If None, the second return value is always
None.
tx: A [batch_size] shaped float tensor of x translations.
ty: A [batch_size] shaped float tensor of y translations.
sx: A [batch_size] shaped float tensor of x scale factor.
sy: A [batch_size] shaped float tensor of y scale factor.
flip: A [batch_size] shaped bool tensor indicating whether or not we
flip the image.
Returns:
transformed_images: Transfomed images of same shape as `images`.
transformed_boxes: If `boxes` was not None, transformed boxes of same
shape as boxes.
"""
_, height, width, _ = shape_utils.combined_static_and_dynamic_shape(
images)
flip_selector = tf.cast(flip, tf.float32)
flip_selector_4d = flip_selector[:, tf.newaxis, tf.newaxis, tf.newaxis]
flip_selector_3d = flip_selector[:, tf.newaxis, tf.newaxis]
flipped_images = tf.image.flip_left_right(images)
images = flipped_images * flip_selector_4d + (1.0 - flip_selector_4d) * images
cy = cx = tf.zeros_like(tx) + 0.5
ymin = -ty*sy + cy - sy * 0.5
xmin = -tx*sx + cx - sx * 0.5
ymax = -ty*sy + cy + sy * 0.5
xmax = -tx*sx + cx + sx * 0.5
crop_box = tf.stack([ymin, xmin, ymax, xmax], axis=1)
crop_box_expanded = crop_box[:, tf.newaxis, :]
images_transformed = spatial_transform_ops.matmul_crop_and_resize(
images, crop_box_expanded, (height, width)
)
images_transformed = images_transformed[:, 0, :, :, :]
if boxes is not None:
flipped_boxes = flip_boxes_left_right(boxes)
boxes = flipped_boxes * flip_selector_3d + (1.0 - flip_selector_3d) * boxes
win_height = ymax - ymin
win_width = xmax - xmin
win_height = win_height[:, tf.newaxis]
win_width = win_width[:, tf.newaxis]
boxes_transformed = (
boxes - tf.stack([ymin, xmin, ymin, xmin], axis=1)[:, tf.newaxis, :])
boxes_ymin, boxes_xmin, boxes_ymax, boxes_xmax = tf.unstack(
boxes_transformed, axis=2)
boxes_ymin *= 1.0 / win_height
boxes_xmin *= 1.0 / win_width
boxes_ymax *= 1.0 / win_height
boxes_xmax *= 1.0 / win_width
boxes = tf.stack([boxes_ymin, boxes_xmin, boxes_ymax, boxes_xmax], axis=2)
return images_transformed, boxes
def transform_instance_masks(instance_masks, tx, ty, sx, sy, flip):
"""Transforms a batch of instances by the given amount.
Args:
instance_masks: A [batch_size, num_instances, height, width, 3] float
tensor of instance masks.
tx: A [batch_size] shaped float tensor of x translations.
ty: A [batch_size] shaped float tensor of y translations.
sx: A [batch_size] shaped float tensor of x scale factor.
sy: A [batch_size] shaped float tensor of y scale factor.
flip: A [batch_size] shaped bool tensor indicating whether or not we
flip the image.
Returns:
transformed_images: Transfomed images of same shape as `images`.
transformed_boxes: If `boxes` was not None, transformed boxes of same
shape as boxes.
"""
instance_masks, batch_size, num_instances = flatten_first2_dims(
instance_masks)
repeat = tf.zeros_like(tx, dtype=tf.int32) + num_instances
tx = tf.repeat(tx, repeat)
ty = tf.repeat(ty, repeat)
sx = tf.repeat(sx, repeat)
sy = tf.repeat(sy, repeat)
flip = tf.repeat(flip, repeat)
instance_masks = instance_masks[:, :, :, tf.newaxis]
instance_masks, _ = transform_images_and_boxes(
instance_masks, boxes=None, tx=tx, ty=ty, sx=sx, sy=sy, flip=flip)
return unpack_first2_dims(
instance_masks[:, :, :, 0], batch_size, num_instances)
class ResNetMaskNetwork(tf.keras.layers.Layer):
"""A small wrapper around ResNet blocks to predict masks."""
def __init__(self, resnet_type, num_init_channels):
"""Creates the ResNet mask network.
Args:
resnet_type: A string of the for resnetN where N where N is in
[4, 8, 12, 16, 20]
num_init_channels: Number of filters in the ResNet block.
"""
super(ResNetMaskNetwork, self).__init__()
nc = num_init_channels
if resnet_type == 'resnet4':
channel_dims = [nc * 2]
blocks = [2]
elif resnet_type == 'resnet8':
channel_dims = [nc * 2]
blocks = [4]
elif resnet_type == 'resnet12':
channel_dims = [nc * 2]
blocks = [6]
elif resnet_type == 'resnet16':
channel_dims = [nc * 2]
blocks = [8]
# Defined such that the channels are roughly similar to the hourglass20.
elif resnet_type == 'resnet20':
channel_dims = [nc * 2, nc * 3]
blocks = [8, 2]
else:
raise ValueError('Unknown resnet type "{}"'.format(resnet_type))
self.input_layer = tf.keras.layers.Conv2D(nc, 1, 1)
# Last channel has to be defined so that batch norm can initialize properly.
model_input = tf.keras.layers.Input([None, None, nc])
output = model_input
for i, (num_blocks, channels) in enumerate(zip(blocks, channel_dims)):
output = resnet_v1.stack_basic(output, filters=channels,
blocks=num_blocks, stride1=1,
name='resnet_mask_block_%d' % i)
self.model = tf.keras.Model(inputs=model_input, outputs=output)
def __call__(self, inputs):
return self.model(self.input_layer(inputs))
class FullyConnectedMaskHead(tf.keras.layers.Layer):
"""A 2 layer fully connected mask head."""
def __init__(self, num_init_channels, mask_size):
super(FullyConnectedMaskHead, self).__init__()
self.fc1 = tf.keras.layers.Dense(units=1024, activation='relu')
self.fc2 = tf.keras.layers.Dense(units=mask_size*mask_size)
self.mask_size = mask_size
self.num_input_channels = num_init_channels
self.input_layer = tf.keras.layers.Conv2D(num_init_channels, 1, 1)
model_input = tf.keras.layers.Input(
[mask_size * mask_size * num_init_channels,])
output = self.fc2(self.fc1(model_input))
self.model = tf.keras.Model(inputs=model_input, outputs=output)
def __call__(self, inputs):
inputs = self.input_layer(inputs)
inputs_shape = tf.shape(inputs)
num_instances = inputs_shape[0]
height = inputs_shape[1]
width = inputs_shape[2]
dims = inputs_shape[3]
flattened_inputs = tf.reshape(inputs,
[num_instances, height * width * dims])
flattened_masks = self.model(flattened_inputs)
return tf.reshape(flattened_masks,
[num_instances, self.mask_size, self.mask_size, 1])
class DenseResidualBlock(tf.keras.layers.Layer):
"""Residual block for 1D inputs.
This class implemented the pre-activation version of the ResNet block.
"""
def __init__(self, hidden_size, use_shortcut_linear):
"""Residual Block for 1D inputs.
Args:
hidden_size: size of the hidden layer.
use_shortcut_linear: bool, whether or not to use a linear layer for
shortcut.
"""
super(DenseResidualBlock, self).__init__()
self.bn_0 = tf.keras.layers.experimental.SyncBatchNormalization(axis=-1)
self.bn_1 = tf.keras.layers.experimental.SyncBatchNormalization(axis=-1)
self.fc_0 = tf.keras.layers.Dense(
hidden_size, activation=None)
self.fc_1 = tf.keras.layers.Dense(
hidden_size, activation=None, kernel_initializer='zeros')
self.activation = tf.keras.layers.Activation('relu')
if use_shortcut_linear:
self.shortcut = tf.keras.layers.Dense(
hidden_size, activation=None, use_bias=False)
else:
self.shortcut = tf.keras.layers.Lambda(lambda x: x)
def __call__(self, inputs):
"""Layer's forward pass.
Args:
inputs: input tensor.
Returns:
Tensor after residual block w/ CondBatchNorm.
"""
out = self.fc_0(self.activation(self.bn_0(inputs)))
residual_inp = self.fc_1(self.activation(self.bn_1(out)))
skip = self.shortcut(inputs)
return residual_inp + skip
class DenseResNet(tf.keras.layers.Layer):
"""Resnet with dense layers."""
def __init__(self, num_layers, hidden_size, output_size):
"""Resnet with dense layers.
Args:
num_layers: int, the number of layers.
hidden_size: size of the hidden layer.
output_size: size of the output.
"""
super(DenseResNet, self).__init__()
self.input_proj = DenseResidualBlock(hidden_size, use_shortcut_linear=True)
if num_layers < 4:
raise ValueError(
'Cannot construct a DenseResNet with less than 4 layers')
num_blocks = (num_layers - 2) // 2
if ((num_blocks * 2) + 2) != num_layers:
raise ValueError(('DenseResNet depth has to be of the form (2n + 2). '
f'Found {num_layers}'))
self._num_blocks = num_blocks
blocks = [DenseResidualBlock(hidden_size, use_shortcut_linear=False)
for _ in range(num_blocks)]
self.resnet = tf.keras.Sequential(blocks)
self.out_conv = tf.keras.layers.Dense(output_size)
def __call__(self, inputs):
net = self.input_proj(inputs)
return self.out_conv(self.resnet(net))
def _is_mask_head_param_free(name):
# Mask heads which don't have parameters of their own and instead rely
# on the instance embedding.
if name == 'embedding_projection' or name.startswith('cond_inst'):
return True
return False
class MaskHeadNetwork(tf.keras.layers.Layer):
"""Mask head class for DeepMAC."""
def __init__(self, network_type, num_init_channels=64,
use_instance_embedding=True, mask_size=None):
"""Initializes the network.
Args:
network_type: A string denoting the kind of network we want to use
internally.
num_init_channels: int, the number of channels in the first block. The
number of channels in the following blocks depend on the network type
used.
use_instance_embedding: bool, if set, we concatenate the instance
embedding to the input while predicting the mask.
mask_size: int, size of the output mask. Required only with
`fully_connected` mask type.
"""
super(MaskHeadNetwork, self).__init__()
self._net = _get_deepmac_network_by_type(
network_type, num_init_channels, mask_size)
self._use_instance_embedding = use_instance_embedding
self._network_type = network_type
self._num_init_channels = num_init_channels
if (self._use_instance_embedding and
(_is_mask_head_param_free(network_type))):
raise ValueError(('Cannot feed instance embedding to mask head when '
'mask-head has no parameters.'))
if _is_mask_head_param_free(network_type):
self.project_out = tf.keras.layers.Lambda(lambda x: x)
else:
self.project_out = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, activation=None)
def __call__(self, instance_embedding, pixel_embedding, training):
"""Returns mask logits given object center and spatial embeddings.
Args:
instance_embedding: A [num_instances, embedding_size] float tensor
representing the center emedding vector of each instance.
pixel_embedding: A [num_instances, height, width, pixel_embedding_size]
float tensor representing the per-pixel spatial embedding for each
instance.
training: boolean flag indicating training or testing mode.
Returns:
mask: A [num_instances, height, width] float tensor containing the mask
logits for each instance.
"""
height = tf.shape(pixel_embedding)[1]
width = tf.shape(pixel_embedding)[2]
if self._use_instance_embedding:
instance_embedding = instance_embedding[:, tf.newaxis, tf.newaxis, :]
instance_embedding = tf.tile(instance_embedding, [1, height, width, 1])
inputs = tf.concat([pixel_embedding, instance_embedding], axis=3)
else:
inputs = pixel_embedding
out = self._net(inputs)
if isinstance(out, list):
out = out[-1]
if self._network_type == 'embedding_projection':
instance_embedding = instance_embedding[:, tf.newaxis, tf.newaxis, :]
out = embedding_projection(instance_embedding, out)
elif self._network_type.startswith('cond_inst'):
depth = int(self._network_type.lstrip('cond_inst'))
out = per_pixel_conditional_conv(out, instance_embedding,
self._num_init_channels, depth)
if out.shape[-1] > 1:
out = self.project_out(out)
return tf.squeeze(out, axis=-1)
def _batch_gt_list(gt_list):
return tf.stack(gt_list, axis=0)
def deepmac_proto_to_params(deepmac_config):
"""Convert proto to named tuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(deepmac_config.classification_loss)
classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss))
deepmac_field_class = (
center_net_pb2.CenterNet.DESCRIPTOR.nested_types_by_name[
'DeepMACMaskEstimation'])
params = {}
for field in deepmac_field_class.fields:
value = getattr(deepmac_config, field.name)
if field.enum_type:
params[field.name] = field.enum_type.values_by_number[value].name.lower()
else:
params[field.name] = value
params['roi_jitter_mode'] = params.pop('jitter_mode')
params['classification_loss'] = classification_loss
return DeepMACParams(**params)
def _warmup_weight(current_training_step, warmup_start, warmup_steps):
"""Utility function for warming up loss weights."""
if warmup_steps == 0:
return 1.0
training_step = tf.cast(current_training_step, tf.float32)
warmup_steps = tf.cast(warmup_steps, tf.float32)
start_step = tf.cast(warmup_start, tf.float32)
warmup_weight = (training_step - start_step) / warmup_steps
warmup_weight = tf.clip_by_value(warmup_weight, 0.0, 1.0)
return warmup_weight
class DeepMACMetaArch(center_net_meta_arch.CenterNetMetaArch):
"""The experimental CenterNet DeepMAC[1] model.
[1]: https://arxiv.org/abs/2104.00613
"""
def __init__(self,
is_training,
add_summaries,
num_classes,
feature_extractor,
image_resizer_fn,
object_center_params,
object_detection_params,
deepmac_params: DeepMACParams,
compute_heatmap_sparse=False):
"""Constructs the super class with object center & detection params only."""
self._deepmac_params = deepmac_params
if (self._deepmac_params.predict_full_resolution_masks and
self._deepmac_params.max_roi_jitter_ratio > 0.0):
raise ValueError('Jittering is not supported for full res masks.')
if self._deepmac_params.mask_num_subsamples > 0:
raise ValueError('Subsampling masks is currently not supported.')
if self._deepmac_params.network_type == 'embedding_projection':
if self._deepmac_params.use_xy:
raise ValueError(
'Cannot use x/y coordinates when using embedding projection.')
pixel_embedding_dim = self._deepmac_params.pixel_embedding_dim
dim = self._deepmac_params.dim
if dim != pixel_embedding_dim:
raise ValueError(
'When using embedding projection mask head, '
f'pixel_embedding_dim({pixel_embedding_dim}) '
f'must be same as dim({dim}).')
generator_class = tf.random.Generator
self._self_supervised_rng = generator_class.from_non_deterministic_state()
super(DeepMACMetaArch, self).__init__(
is_training=is_training, add_summaries=add_summaries,
num_classes=num_classes, feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=object_center_params,
object_detection_params=object_detection_params,
compute_heatmap_sparse=compute_heatmap_sparse)
def _construct_prediction_heads(self, num_classes, num_feature_outputs,
class_prediction_bias_init):
super_instance = super(DeepMACMetaArch, self)
prediction_heads = super_instance._construct_prediction_heads( # pylint:disable=protected-access
num_classes, num_feature_outputs, class_prediction_bias_init)
if self._deepmac_params is not None:
prediction_heads[INSTANCE_EMBEDDING] = [
center_net_meta_arch.make_prediction_net(self._deepmac_params.dim)
for _ in range(num_feature_outputs)
]
prediction_heads[PIXEL_EMBEDDING] = [
center_net_meta_arch.make_prediction_net(
self._deepmac_params.pixel_embedding_dim)
for _ in range(num_feature_outputs)
]
self._mask_net = MaskHeadNetwork(
network_type=self._deepmac_params.network_type,
use_instance_embedding=self._deepmac_params.use_instance_embedding,
num_init_channels=self._deepmac_params.num_init_channels)
return prediction_heads
def _get_mask_head_input(self, boxes, pixel_embedding):
"""Get the input to the mask network, given bounding boxes.
Args:
boxes: A [batch_size, num_instances, 4] float tensor containing bounding
boxes in normalized coordinates.
pixel_embedding: A [batch_size, height, width, embedding_size] float
tensor containing spatial pixel embeddings.
Returns:
embedding: A [batch_size, num_instances, mask_height, mask_width,
embedding_size + 2] float tensor containing the inputs to the mask
network. For each bounding box, we concatenate the normalized box
coordinates to the cropped pixel embeddings. If
predict_full_resolution_masks is set, mask_height and mask_width are
the same as height and width of pixel_embedding. If not, mask_height
and mask_width are the same as mask_size.
"""
batch_size, num_instances = tf.shape(boxes)[0], tf.shape(boxes)[1]
mask_size = self._deepmac_params.mask_size
if self._deepmac_params.predict_full_resolution_masks:
num_instances = tf.shape(boxes)[1]
pixel_embedding = pixel_embedding[:, tf.newaxis, :, :, :]
pixel_embeddings_processed = tf.tile(pixel_embedding,
[1, num_instances, 1, 1, 1])
image_shape = tf.shape(pixel_embeddings_processed)
image_height, image_width = image_shape[2], image_shape[3]
y_grid, x_grid = tf.meshgrid(tf.linspace(0.0, 1.0, image_height),
tf.linspace(0.0, 1.0, image_width),
indexing='ij')
ycenter = (boxes[:, :, 0] + boxes[:, :, 2]) / 2.0
xcenter = (boxes[:, :, 1] + boxes[:, :, 3]) / 2.0
y_grid = y_grid[tf.newaxis, tf.newaxis, :, :]
x_grid = x_grid[tf.newaxis, tf.newaxis, :, :]
y_grid -= ycenter[:, :, tf.newaxis, tf.newaxis]
x_grid -= xcenter[:, :, tf.newaxis, tf.newaxis]
coords = tf.stack([y_grid, x_grid], axis=4)
else:
# TODO(vighneshb) Explore multilevel_roi_align and align_corners=False.
embeddings = spatial_transform_ops.matmul_crop_and_resize(
pixel_embedding, boxes, [mask_size, mask_size])
pixel_embeddings_processed = embeddings
mask_shape = tf.shape(pixel_embeddings_processed)
mask_height, mask_width = mask_shape[2], mask_shape[3]
y_grid, x_grid = tf.meshgrid(tf.linspace(-1.0, 1.0, mask_height),
tf.linspace(-1.0, 1.0, mask_width),
indexing='ij')
coords = tf.stack([y_grid, x_grid], axis=2)
coords = coords[tf.newaxis, tf.newaxis, :, :, :]
coords = tf.tile(coords, [batch_size, num_instances, 1, 1, 1])
if self._deepmac_params.use_xy:
return tf.concat([coords, pixel_embeddings_processed], axis=4)
else:
return pixel_embeddings_processed
def _get_instance_embeddings(self, boxes, instance_embedding):
"""Return the instance embeddings from bounding box centers.
Args:
boxes: A [batch_size, num_instances, 4] float tensor holding bounding
boxes. The coordinates are in normalized input space.
instance_embedding: A [batch_size, height, width, embedding_size] float
tensor containing the instance embeddings.
Returns:
instance_embeddings: A [batch_size, num_instances, embedding_size]
shaped float tensor containing the center embedding for each instance.
"""
output_height = tf.cast(tf.shape(instance_embedding)[1], tf.float32)
output_width = tf.cast(tf.shape(instance_embedding)[2], tf.float32)
ymin = boxes[:, :, 0]
xmin = boxes[:, :, 1]
ymax = boxes[:, :, 2]
xmax = boxes[:, :, 3]
y_center_output = (ymin + ymax) * output_height / 2.0
x_center_output = (xmin + xmax) * output_width / 2.0
center_coords_output = tf.stack([y_center_output, x_center_output], axis=2)
center_coords_output_int = tf.cast(center_coords_output, tf.int32)
center_latents = tf.gather_nd(instance_embedding, center_coords_output_int,
batch_dims=1)
return center_latents
def predict(self, preprocessed_inputs, true_image_shapes):
prediction_dict = super(DeepMACMetaArch, self).predict(
preprocessed_inputs, true_image_shapes)
if self.groundtruth_has_field(fields.BoxListFields.boxes):
mask_logits = self._predict_mask_logits_from_gt_boxes(prediction_dict)
prediction_dict[MASK_LOGITS_GT_BOXES] = mask_logits
if self._deepmac_params.augmented_self_supervision_loss_weight > 0.0:
prediction_dict[SELF_SUPERVISED_DEAUGMENTED_MASK_LOGITS] = (
self._predict_deaugmented_mask_logits_on_augmented_inputs(
preprocessed_inputs, true_image_shapes))
return prediction_dict
def _predict_deaugmented_mask_logits_on_augmented_inputs(
self, preprocessed_inputs, true_image_shapes):
"""Predicts masks on augmented images and reverses that augmentation.
The masks are de-augmented so that they are aligned with the original image.
Args:
preprocessed_inputs: A batch of images of shape
[batch_size, height, width, 3].
true_image_shapes: True shape of the image in case there is any padding.
Returns:
mask_logits:
A float tensor of shape [batch_size, num_instances,
output_height, output_width, ]
"""
batch_size = tf.shape(preprocessed_inputs)[0]
gt_boxes = _batch_gt_list(
self.groundtruth_lists(fields.BoxListFields.boxes))
max_t = self._deepmac_params.augmented_self_supervision_max_translation
tx = self._self_supervised_rng.uniform(
[batch_size], minval=-max_t, maxval=max_t)
ty = self._self_supervised_rng.uniform(
[batch_size], minval=-max_t, maxval=max_t)
scale_min = self._deepmac_params.augmented_self_supervision_scale_min
scale_max = self._deepmac_params.augmented_self_supervision_scale_max
sx = self._self_supervised_rng.uniform([batch_size], minval=scale_min,
maxval=scale_max)
sy = self._self_supervised_rng.uniform([batch_size], minval=scale_min,
maxval=scale_max)
flip = (self._self_supervised_rng.uniform(
[batch_size], minval=0.0, maxval=1.0) <
self._deepmac_params.augmented_self_supervision_flip_probability)
augmented_inputs, augmented_boxes = transform_images_and_boxes(
preprocessed_inputs, gt_boxes, tx=tx, ty=ty, sx=sx, sy=sy, flip=flip
)
augmented_prediction_dict = super(DeepMACMetaArch, self).predict(
augmented_inputs, true_image_shapes)
augmented_masks_lists = self._predict_mask_logits_from_boxes(
augmented_prediction_dict, augmented_boxes)
deaugmented_masks_list = []
for mask_logits in augmented_masks_lists:
deaugmented_masks = transform_instance_masks(
mask_logits, tx=-tx, ty=-ty, sx=1.0/sx, sy=1.0/sy, flip=flip)
deaugmented_masks = tf.stop_gradient(deaugmented_masks)
deaugmented_masks_list.append(deaugmented_masks)
return deaugmented_masks_list
def _predict_mask_logits_from_embeddings(
self, pixel_embedding, instance_embedding, boxes):
mask_input = self._get_mask_head_input(boxes, pixel_embedding)
mask_input, batch_size, num_instances = flatten_first2_dims(mask_input)
instance_embeddings = self._get_instance_embeddings(
boxes, instance_embedding)
instance_embeddings, _, _ = flatten_first2_dims(instance_embeddings)
mask_logits = self._mask_net(
instance_embeddings, mask_input,
training=tf.keras.backend.learning_phase())
mask_logits = unpack_first2_dims(
mask_logits, batch_size, num_instances)
return mask_logits
def _predict_mask_logits_from_boxes(self, prediction_dict, boxes):
"""Predict mask logits using the predict dict and the given set of boxes.
Args:
prediction_dict: a dict containing the keys INSTANCE_EMBEDDING and
PIXEL_EMBEDDING, both expected to be list of tensors.
boxes: A [batch_size, num_instances, 4] float tensor of boxes in the
normalized coordinate system.
Returns:
mask_logits_list: A list of mask logits with the same spatial extents
as prediction_dict[PIXEL_EMBEDDING].
Returns:
"""
mask_logits_list = []
instance_embedding_list = prediction_dict[INSTANCE_EMBEDDING]
pixel_embedding_list = prediction_dict[PIXEL_EMBEDDING]
if self._deepmac_params.use_only_last_stage:
instance_embedding_list = [instance_embedding_list[-1]]
pixel_embedding_list = [pixel_embedding_list[-1]]
for (instance_embedding, pixel_embedding) in zip(instance_embedding_list,
pixel_embedding_list):
mask_logits_list.append(
self._predict_mask_logits_from_embeddings(
pixel_embedding, instance_embedding, boxes))
return mask_logits_list
def _predict_mask_logits_from_gt_boxes(self, prediction_dict):
return self._predict_mask_logits_from_boxes(
prediction_dict,
_batch_gt_list(self.groundtruth_lists(fields.BoxListFields.boxes)))
def _get_groundtruth_mask_output(self, boxes, masks):
"""Get the expected mask output for each box.
Args:
boxes: A [batch_size, num_instances, 4] float tensor containing bounding
boxes in normalized coordinates.
masks: A [batch_size, num_instances, height, width] float tensor
containing binary ground truth masks.
Returns:
masks: If predict_full_resolution_masks is set, masks are not resized
and the size of this tensor is [batch_size, num_instances,
input_height, input_width]. Otherwise, returns a tensor of size
[batch_size, num_instances, mask_size, mask_size].
"""
mask_size = self._deepmac_params.mask_size
if self._deepmac_params.predict_full_resolution_masks:
return masks
else:
cropped_masks = crop_and_resize_instance_masks(
masks, boxes, mask_size)
cropped_masks = tf.stop_gradient(cropped_masks)
# TODO(vighneshb) should we discretize masks?
return cropped_masks
def _resize_logits_like_gt(self, logits, gt):
height, width = tf.shape(gt)[2], tf.shape(gt)[3]
return resize_instance_masks(logits, (height, width))
def _aggregate_classification_loss(self, loss, gt, pred, method):
"""Aggregates loss at a per-instance level.
When this function is used with mask-heads, num_classes is usually 1.
Args:
loss: A [num_instances, num_pixels, num_classes] or
[num_instances, num_classes] tensor. If the tensor is of rank 2, i.e.,
of the form [num_instances, num_classes], we will assume that the
number of pixels have already been nornalized.
gt: A [num_instances, num_pixels, num_classes] float tensor of
groundtruths.
pred: A [num_instances, num_pixels, num_classes] float tensor of
preditions.
method: A string in ['auto', 'groundtruth'].
'auto': When `loss` is rank 2, aggregates by sum. Otherwise, aggregates
by mean.
'groundtruth_count': Aggreagates the loss by computing sum and dividing
by the number of positive (1) groundtruth pixels.
'balanced': Normalizes each pixel by the number of positive or negative
pixels depending on the groundtruth.
Returns:
per_instance_loss: A [num_instances] float tensor.
"""
rank = len(loss.get_shape().as_list())
if rank == 2:
axes = [1]
else:
axes = [1, 2]
if method == 'normalize_auto':
normalization = 1.0
if rank == 2:
return tf.reduce_sum(loss, axis=axes)
else:
return tf.reduce_mean(loss, axis=axes)
elif method == 'normalize_groundtruth_count':
normalization = tf.reduce_sum(gt, axis=axes)
return tf.reduce_sum(loss, axis=axes) / normalization
elif method == 'normalize_balanced':
if rank != 3:
raise ValueError('Cannot apply normalized_balanced aggregation '
f'to loss of rank {rank}')
normalization = (
(gt * tf.reduce_sum(gt, keepdims=True, axis=axes)) +
(1 - gt) * tf.reduce_sum(1 - gt, keepdims=True, axis=axes))
return tf.reduce_sum(loss / normalization, axis=axes)
else:
raise ValueError('Unknown loss aggregation - {}'.format(method))
def _compute_mask_prediction_loss(
self, boxes, mask_logits, mask_gt, classes):
"""Compute the per-instance mask loss.
Args:
boxes: A [batch_size, num_instances, 4] float tensor of GT boxes in
normalized coordinates.
mask_logits: A [batch_size, num_instances, height, width] float tensor of
predicted masks
mask_gt: The groundtruth mask of same shape as mask_logits.
classes: A [batch_size, num_instances, num_classes] shaped tensor of
one-hot encoded classes.
Returns:
loss: A [batch_size, num_instances] shaped tensor with the loss for each
instance.
"""
if mask_gt is None:
logging.info('No mask GT provided, mask loss is 0.')
return tf.zeros_like(boxes[:, :, 0])
batch_size, num_instances = tf.shape(boxes)[0], tf.shape(boxes)[1]
mask_logits = self._resize_logits_like_gt(mask_logits, mask_gt)
height, width = tf.shape(mask_logits)[2], tf.shape(mask_logits)[3]
if self._deepmac_params.ignore_per_class_box_overlap:
mask_logits *= per_instance_no_class_overlap(
classes, boxes, height, width)
height, wdith = tf.shape(mask_gt)[2], tf.shape(mask_gt)[3]
mask_logits *= per_instance_no_class_overlap(
classes, boxes, height, wdith)
mask_logits = tf.reshape(mask_logits, [batch_size * num_instances, -1, 1])
mask_gt = tf.reshape(mask_gt, [batch_size * num_instances, -1, 1])
loss = self._deepmac_params.classification_loss(
prediction_tensor=mask_logits,
target_tensor=mask_gt,
weights=tf.ones_like(mask_logits))
loss = self._aggregate_classification_loss(
loss, mask_gt, mask_logits, 'normalize_auto')
return tf.reshape(loss, [batch_size, num_instances])
def _compute_box_consistency_loss(
self, boxes_gt, boxes_for_crop, mask_logits):
"""Compute the per-instance box consistency loss.
Args:
boxes_gt: A [batch_size, num_instances, 4] float tensor of GT boxes.
boxes_for_crop: A [batch_size, num_instances, 4] float tensor of
augmented boxes, to be used when using crop-and-resize based mask head.
mask_logits: A [batch_size, num_instances, height, width]
float tensor of predicted masks.
Returns:
loss: A [batch_size, num_instances] shaped tensor with the loss for
each instance in the batch.
"""
shape = tf.shape(mask_logits)
batch_size, num_instances, height, width = (
shape[0], shape[1], shape[2], shape[3])
filled_boxes = fill_boxes(boxes_gt, height, width)[:, :, :, :, tf.newaxis]
mask_logits = mask_logits[:, :, :, :, tf.newaxis]
if self._deepmac_params.predict_full_resolution_masks:
gt_crop = filled_boxes[:, :, :, :, 0]
pred_crop = mask_logits[:, :, :, :, 0]
else:
gt_crop = crop_and_resize_instance_masks(
filled_boxes, boxes_for_crop, self._deepmac_params.mask_size)
pred_crop = crop_and_resize_instance_masks(
mask_logits, boxes_for_crop, self._deepmac_params.mask_size)
loss = 0.0
for axis in [2, 3]:
if self._deepmac_params.box_consistency_tightness:
pred_max_raw = tf.reduce_max(pred_crop, axis=axis)
pred_max_within_box = tf.reduce_max(pred_crop * gt_crop, axis=axis)
box_1d = tf.reduce_max(gt_crop, axis=axis)
pred_max = ((box_1d * pred_max_within_box) +
((1 - box_1d) * pred_max_raw))
else:
pred_max = tf.reduce_max(pred_crop, axis=axis)
pred_max = pred_max[:, :, :, tf.newaxis]
gt_max = tf.reduce_max(gt_crop, axis=axis)[:, :, :, tf.newaxis]
flat_pred, batch_size, num_instances = flatten_first2_dims(pred_max)
flat_gt, _, _ = flatten_first2_dims(gt_max)
# We use flat tensors while calling loss functions because we
# want the loss per-instance to later multiply with the per-instance
# weight. Flattening the first 2 dims allows us to represent each instance
# in each batch as though they were samples in a larger batch.
raw_loss = self._deepmac_params.classification_loss(
prediction_tensor=flat_pred,
target_tensor=flat_gt,
weights=tf.ones_like(flat_pred))
agg_loss = self._aggregate_classification_loss(
raw_loss, flat_gt, flat_pred,
self._deepmac_params.box_consistency_loss_normalize)
loss += unpack_first2_dims(agg_loss, batch_size, num_instances)
return loss
def _compute_feature_consistency_loss(
self, boxes, consistency_feature_map, mask_logits):
"""Compute the per-instance feature consistency loss.
Args:
boxes: A [batch_size, num_instances, 4] float tensor of GT boxes.
consistency_feature_map: A [batch_size, height, width, 3]
float tensor containing the feature map to use for consistency.
mask_logits: A [batch_size, num_instances, height, width] float tensor of
predicted masks.
Returns:
loss: A [batch_size, num_instances] shaped tensor with the loss for each
instance fpr each sample in the batch.
"""
if not self._deepmac_params.predict_full_resolution_masks:
logging.info('Feature consistency is not implemented with RoIAlign '
', i.e, fixed sized masks. Returning 0 loss.')
return tf.zeros(tf.shape(boxes)[:2])
dilation = self._deepmac_params.feature_consistency_dilation
height, width = (tf.shape(consistency_feature_map)[1],
tf.shape(consistency_feature_map)[2])
comparison = self._deepmac_params.feature_consistency_comparison
if comparison == 'comparison_default_gaussian':
similarity = dilated_cross_pixel_similarity(
consistency_feature_map, dilation=dilation, theta=2.0,
method='gaussian')
elif comparison == 'comparison_normalized_dotprod':
consistency_feature_map = normalize_feature_map(consistency_feature_map)
similarity = dilated_cross_pixel_similarity(
consistency_feature_map, dilation=dilation, theta=2.0,
method='dotprod')
else:
raise ValueError('Unknown comparison type - %s' % comparison)
mask_probs = tf.nn.sigmoid(mask_logits)
same_mask_label_probability = dilated_cross_same_mask_label(
mask_probs, dilation=dilation)
same_mask_label_probability = tf.clip_by_value(
same_mask_label_probability, 1e-3, 1.0)
similarity_mask = (
similarity > self._deepmac_params.feature_consistency_threshold)
similarity_mask = tf.cast(
similarity_mask[:, :, tf.newaxis, :, :], tf.float32)
per_pixel_loss = -(similarity_mask *
tf.math.log(same_mask_label_probability))
# TODO(vighneshb) explore if shrinking the box by 1px helps.
box_mask = fill_boxes(boxes, height, width, expand=2)
box_mask_expanded = box_mask[tf.newaxis]
per_pixel_loss = per_pixel_loss * box_mask_expanded
loss = tf.reduce_sum(per_pixel_loss, axis=[0, 3, 4])
num_box_pixels = tf.maximum(1.0, tf.reduce_sum(box_mask, axis=[2, 3]))
loss = loss / num_box_pixels
if tf.keras.backend.learning_phase():
loss *= _warmup_weight(
current_training_step=self._training_step,
warmup_start=self._deepmac_params.feature_consistency_warmup_start,
warmup_steps=self._deepmac_params.feature_consistency_warmup_steps)
return loss
def _self_supervision_loss(
self, predicted_logits, self_supervised_logits, boxes, loss_name):
original_shape = tf.shape(predicted_logits)
batch_size, num_instances = original_shape[0], original_shape[1]
box_mask = fill_boxes(boxes, original_shape[2], original_shape[3])
loss_tensor_shape = [batch_size * num_instances, -1, 1]
weights = tf.reshape(box_mask, loss_tensor_shape)
predicted_logits = tf.reshape(predicted_logits, loss_tensor_shape)
self_supervised_logits = tf.reshape(self_supervised_logits,
loss_tensor_shape)
self_supervised_probs = tf.nn.sigmoid(self_supervised_logits)
predicted_probs = tf.nn.sigmoid(predicted_logits)
num_box_pixels = tf.reduce_sum(weights, axis=[1, 2])
num_box_pixels = tf.maximum(num_box_pixels, 1.0)
if loss_name == 'loss_dice':
self_supervised_binary_probs = tf.cast(
self_supervised_logits > 0.0, tf.float32)
loss_class = losses.WeightedDiceClassificationLoss(
squared_normalization=False)
loss = loss_class(prediction_tensor=predicted_logits,
target_tensor=self_supervised_binary_probs,
weights=weights)
agg_loss = self._aggregate_classification_loss(
loss, gt=self_supervised_probs, pred=predicted_logits,
method='normalize_auto')
elif loss_name == 'loss_mse':
diff = self_supervised_probs - predicted_probs
diff_sq = (diff * diff)
diff_sq_sum = tf.reduce_sum(diff_sq * weights, axis=[1, 2])
agg_loss = diff_sq_sum / num_box_pixels
elif loss_name == 'loss_kl_div':
loss_class = tf.keras.losses.KLDivergence(
reduction=tf.keras.losses.Reduction.NONE)
predicted_2class_probability = tf.stack(
[predicted_probs, 1 - predicted_probs], axis=2
)
target_2class_probability = tf.stack(
[self_supervised_probs, 1 - self_supervised_probs], axis=2
)
loss = loss_class(
y_pred=predicted_2class_probability,
y_true=target_2class_probability)
agg_loss = tf.reduce_sum(loss * weights, axis=[1, 2]) / num_box_pixels
else:
raise RuntimeError('Unknown self-supervision loss %s' % loss_name)
return tf.reshape(agg_loss, [batch_size, num_instances])
def _compute_self_supervised_augmented_loss(
self, original_logits, deaugmented_logits, boxes):
if deaugmented_logits is None:
logging.info('No self supervised masks provided. '
'Returning 0 self-supervised loss,')
return tf.zeros(tf.shape(original_logits)[:2])
loss = self._self_supervision_loss(
predicted_logits=original_logits,
self_supervised_logits=deaugmented_logits,
boxes=boxes,
loss_name=self._deepmac_params.augmented_self_supervision_loss)
if tf.keras.backend.learning_phase():
loss *= _warmup_weight(
current_training_step=self._training_step,
warmup_start=
self._deepmac_params.augmented_self_supervision_warmup_start,
warmup_steps=
self._deepmac_params.augmented_self_supervision_warmup_steps)
return loss
def _compute_pointly_supervised_loss_from_keypoints(
self, mask_logits, keypoints_gt, keypoints_depth_gt):
"""Computes per-point mask loss from keypoints.
Args:
mask_logits: A [batch_size, num_instances, height, width] float tensor
denoting predicted masks.
keypoints_gt: A [batch_size, num_instances, num_keypoints, 2] float tensor
of normalize keypoint coordinates.
keypoints_depth_gt: A [batch_size, num_instances, num_keyponts] float
tensor of keypoint depths. We assume that +1 is foreground and -1
is background.
Returns:
loss: Pointly supervised loss with shape [batch_size, num_instances].
"""
if keypoints_gt is None:
logging.info(('Returning 0 pointly supervised loss because '
'keypoints are not given.'))
return tf.zeros(tf.shape(mask_logits)[:2])
if keypoints_depth_gt is None:
logging.info(('Returning 0 pointly supervised loss because '
'keypoint depths are not given.'))
return tf.zeros(tf.shape(mask_logits)[:2])
if not self._deepmac_params.predict_full_resolution_masks:
raise NotImplementedError(
'Pointly supervised loss not implemented with RoIAlign.')
num_keypoints = tf.shape(keypoints_gt)[2]
keypoints_nan = tf.math.is_nan(keypoints_gt)
keypoints_gt = tf.where(
keypoints_nan, tf.zeros_like(keypoints_gt), keypoints_gt)
weights = tf.cast(
tf.logical_not(tf.reduce_any(keypoints_nan, axis=3)), tf.float32)
height, width = tf.shape(mask_logits)[2], tf.shape(mask_logits)[3]
ky, kx = tf.unstack(keypoints_gt, axis=3)
height_f, width_f = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
ky = tf.clip_by_value(tf.cast(ky * height_f, tf.int32), 0, height - 1)
kx = tf.clip_by_value(tf.cast(kx * width_f, tf.int32), 0, width - 1)
keypoints_gt_int = tf.stack([ky, kx], axis=3)
mask_logits_flat, batch_size, num_instances = flatten_first2_dims(
mask_logits)
keypoints_gt_int_flat, _, _ = flatten_first2_dims(keypoints_gt_int)
keypoint_depths_flat, _, _ = flatten_first2_dims(keypoints_depth_gt)
weights_flat = tf.logical_not(
tf.reduce_any(keypoints_nan, axis=2))
weights_flat, _, _ = flatten_first2_dims(weights)
# TODO(vighneshb): Replace with bilinear interpolation
point_mask_logits = tf.gather_nd(
mask_logits_flat, keypoints_gt_int_flat, batch_dims=1)
point_mask_logits = tf.reshape(
point_mask_logits, [batch_size * num_instances, num_keypoints, 1])
labels = tf.cast(keypoint_depths_flat > 0.0, tf.float32)
labels = tf.reshape(
labels, [batch_size * num_instances, num_keypoints, 1])
weights_flat = tf.reshape(
weights_flat, [batch_size * num_instances, num_keypoints, 1])
loss = self._deepmac_params.classification_loss(
prediction_tensor=point_mask_logits, target_tensor=labels,
weights=weights_flat
)
loss = self._aggregate_classification_loss(
loss, gt=labels, pred=point_mask_logits, method='normalize_auto')
return tf.reshape(loss, [batch_size, num_instances])
def _compute_deepmac_losses(
self, boxes, masks_logits, masks_gt, classes, consistency_feature_map,
self_supervised_masks_logits=None, keypoints_gt=None,
keypoints_depth_gt=None):
"""Returns the mask loss per instance.
Args:
boxes: A [batch_size, num_instances, 4] float tensor holding bounding
boxes. The coordinates are in normalized input space.
masks_logits: A [batch_size, num_instances, output_height, output_height].
float tensor containing the instance mask predictions in their logit
form.
masks_gt: A [batch_size, num_instances, output_height, output_width] float
tensor containing the groundtruth masks. If masks_gt is None,
DEEP_MASK_ESTIMATION is filled with 0s.
classes: A [batch_size, num_instances, num_classes] tensor of one-hot
encoded classes.
consistency_feature_map: [batch_size, output_height, output_width,
channels] float tensor denoting the image to use for consistency.
self_supervised_masks_logits: Optional self-supervised mask logits to
compare against of same shape as mask_logits.
keypoints_gt: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2], representing the points
where we have mask supervision.
keypoints_depth_gt: A float tensor of shape
[batch_size, num_instances, num_keypoints] of keypoint depths which
indicate the mask label at the keypoint locations. depth=+1 is
foreground and depth=-1 is background.
Returns:
tensor_dict: A dictionary with 4 keys, each mapping to a tensor of shape
[batch_size, num_instances]. The 4 keys are:
- DEEP_MASK_ESTIMATION
- DEEP_MASK_BOX_CONSISTENCY
- DEEP_MASK_FEATURE_CONSISTENCY
- DEEP_MASK_AUGMENTED_SELF_SUPERVISION
- DEEP_MASK_POINTLY_SUPERVISED
"""
if tf.keras.backend.learning_phase():
boxes = tf.stop_gradient(boxes)
def jitter_func(boxes):
return preprocessor.random_jitter_boxes(
boxes, self._deepmac_params.max_roi_jitter_ratio,
jitter_mode=self._deepmac_params.roi_jitter_mode)
boxes_for_crop = tf.map_fn(jitter_func,
boxes, parallel_iterations=128)
else:
boxes_for_crop = boxes
if masks_gt is not None:
masks_gt = self._get_groundtruth_mask_output(
boxes_for_crop, masks_gt)
mask_prediction_loss = self._compute_mask_prediction_loss(
boxes_for_crop, masks_logits, masks_gt, classes)
box_consistency_loss = self._compute_box_consistency_loss(
boxes, boxes_for_crop, masks_logits)
feature_consistency_loss = self._compute_feature_consistency_loss(
boxes, consistency_feature_map, masks_logits)
self_supervised_loss = self._compute_self_supervised_augmented_loss(
masks_logits, self_supervised_masks_logits, boxes,
)
pointly_supervised_loss = (
self._compute_pointly_supervised_loss_from_keypoints(
masks_logits, keypoints_gt, keypoints_depth_gt))
return {
DEEP_MASK_ESTIMATION: mask_prediction_loss,
DEEP_MASK_BOX_CONSISTENCY: box_consistency_loss,
DEEP_MASK_FEATURE_CONSISTENCY: feature_consistency_loss,
DEEP_MASK_AUGMENTED_SELF_SUPERVISION: self_supervised_loss,
DEEP_MASK_POINTLY_SUPERVISED: pointly_supervised_loss,
}
def _get_lab_image(self, preprocessed_image):
raw_image = self._feature_extractor.preprocess_reverse(
preprocessed_image)
raw_image = raw_image / 255.0
if tf_version.is_tf1():
raise NotImplementedError(('RGB-to-LAB conversion required for the color'
' consistency loss is not supported in TF1.'))
return tfio.experimental.color.rgb_to_lab(raw_image)
def _maybe_get_gt_batch(self, field):
"""Returns a batch of groundtruth tensors if available, else None."""
if self.groundtruth_has_field(field):
return _batch_gt_list(self.groundtruth_lists(field))
else:
return None
def _get_consistency_feature_map(self, prediction_dict):
prediction_shape = tf.shape(prediction_dict[MASK_LOGITS_GT_BOXES][0])
height, width = prediction_shape[2], prediction_shape[3]
consistency_type = self._deepmac_params.feature_consistency_type
if consistency_type == 'consistency_default_lab':
preprocessed_image = tf.image.resize(
prediction_dict['preprocessed_inputs'], (height, width))
consistency_feature_map = self._get_lab_image(preprocessed_image)
elif consistency_type == 'consistency_feature_map':
consistency_feature_map = prediction_dict['extracted_features'][-1]
consistency_feature_map = tf.image.resize(
consistency_feature_map, (height, width))
else:
raise ValueError('Unknown feature consistency type - {}.'.format(
self._deepmac_params.feature_consistency_type))
return tf.stop_gradient(consistency_feature_map)
def _compute_masks_loss(self, prediction_dict):
"""Computes the mask loss.
Args:
prediction_dict: dict from predict() method containing
INSTANCE_EMBEDDING and PIXEL_EMBEDDING prediction.
Both of these are lists of tensors, each of size
[batch_size, height, width, embedding_size].
Returns:
loss_dict: A dict mapping string (loss names) to scalar floats.
"""
allowed_masked_classes_ids = (
self._deepmac_params.allowed_masked_classes_ids)
loss_dict = {}
for loss_name in MASK_LOSSES:
loss_dict[loss_name] = 0.0
gt_boxes = self._maybe_get_gt_batch(fields.BoxListFields.boxes)
gt_weights = self._maybe_get_gt_batch(fields.BoxListFields.weights)
gt_classes = self._maybe_get_gt_batch(fields.BoxListFields.classes)
gt_masks = self._maybe_get_gt_batch(fields.BoxListFields.masks)
gt_keypoints = self._maybe_get_gt_batch(fields.BoxListFields.keypoints)
gt_depths = self._maybe_get_gt_batch(fields.BoxListFields.keypoint_depths)
mask_logits_list = prediction_dict[MASK_LOGITS_GT_BOXES]
self_supervised_mask_logits_list = prediction_dict.get(
SELF_SUPERVISED_DEAUGMENTED_MASK_LOGITS,
[None] * len(mask_logits_list))
assert len(mask_logits_list) == len(self_supervised_mask_logits_list)
consistency_feature_map = self._get_consistency_feature_map(prediction_dict)
# Iterate over multiple preidctions by backbone (for hourglass length=2)
for (mask_logits, self_supervised_mask_logits) in zip(
mask_logits_list, self_supervised_mask_logits_list):
# TODO(vighneshb) Add sub-sampling back if required.
_, valid_mask_weights, gt_masks = filter_masked_classes(
allowed_masked_classes_ids, gt_classes,
gt_weights, gt_masks)
sample_loss_dict = self._compute_deepmac_losses(
boxes=gt_boxes, masks_logits=mask_logits, masks_gt=gt_masks,
classes=gt_classes, consistency_feature_map=consistency_feature_map,
self_supervised_masks_logits=self_supervised_mask_logits,
keypoints_gt=gt_keypoints, keypoints_depth_gt=gt_depths)
sample_loss_dict[DEEP_MASK_ESTIMATION] *= valid_mask_weights
for loss_name in WEAK_LOSSES:
sample_loss_dict[loss_name] *= gt_weights
num_instances = tf.maximum(tf.reduce_sum(gt_weights), 1.0)
num_instances_allowed = tf.maximum(
tf.reduce_sum(valid_mask_weights), 1.0)
loss_dict[DEEP_MASK_ESTIMATION] += (
tf.reduce_sum(sample_loss_dict[DEEP_MASK_ESTIMATION]) /
num_instances_allowed)
for loss_name in WEAK_LOSSES:
loss_dict[loss_name] += (tf.reduce_sum(sample_loss_dict[loss_name]) /
num_instances)
num_predictions = len(mask_logits_list)
return dict((key, loss / float(num_predictions))
for key, loss in loss_dict.items())
def loss(self, prediction_dict, true_image_shapes, scope=None):
losses_dict = super(DeepMACMetaArch, self).loss(
prediction_dict, true_image_shapes, scope)
if self._deepmac_params is not None:
mask_loss_dict = self._compute_masks_loss(
prediction_dict=prediction_dict)
for loss_name in MASK_LOSSES:
loss_weight = _get_loss_weight(loss_name, self._deepmac_params)
if loss_weight > 0.0:
losses_dict[LOSS_KEY_PREFIX + '/' + loss_name] = (
loss_weight * mask_loss_dict[loss_name])
return losses_dict
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Produces boxes given a prediction dict returned by predict().
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
And all other fields returned by the super class method.
"""
postprocess_dict = super(DeepMACMetaArch, self).postprocess(
prediction_dict, true_image_shapes, **params)
boxes_strided = postprocess_dict['detection_boxes_strided']
if self._deepmac_params is not None:
masks = self._postprocess_masks(
boxes_strided, prediction_dict[INSTANCE_EMBEDDING][-1],
prediction_dict[PIXEL_EMBEDDING][-1])
postprocess_dict[fields.DetectionResultFields.detection_masks] = masks
return postprocess_dict
def _postprocess_masks(self, boxes_output_stride,
instance_embedding, pixel_embedding):
"""Postprocess masks with the deep mask network.
Args:
boxes_output_stride: A [batch_size, num_instances, 4] float tensor
containing the batch of boxes in the absolute output space of the
feature extractor.
instance_embedding: A [batch_size, output_height, output_width,
embedding_size] float tensor containing instance embeddings.
pixel_embedding: A [batch_size, output_height, output_width,
pixel_embedding_size] float tensor containing the per-pixel embedding.
Returns:
masks: A float tensor of size [batch_size, num_instances, mask_size,
mask_size] containing binary per-box instance masks.
"""
height, width = (tf.shape(instance_embedding)[1],
tf.shape(instance_embedding)[2])
boxes = boxes_batch_absolute_to_normalized_coordinates(
boxes_output_stride, height, width)
mask_logits = self._predict_mask_logits_from_embeddings(
pixel_embedding, instance_embedding, boxes)
# TODO(vighneshb) Explore sweeping mask thresholds.
if self._deepmac_params.predict_full_resolution_masks:
height, width = tf.shape(mask_logits)[1], tf.shape(mask_logits)[2]
height *= self._stride
width *= self._stride
mask_logits = resize_instance_masks(mask_logits, (height, width))
mask_logits = crop_and_resize_instance_masks(
mask_logits, boxes, self._deepmac_params.postprocess_crop_size)
masks_prob = tf.nn.sigmoid(mask_logits)
return masks_prob
def _transform_boxes_to_feature_coordinates(self, provided_boxes,
true_image_shapes,
resized_image_shape,
instance_embedding):
"""Transforms normalzied boxes to feature map coordinates.
Args:
provided_boxes: A [batch, num_instances, 4] float tensor containing
normalized bounding boxes.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
resized_image_shape: A 4D int32 tensor containing shapes of the
preprocessed inputs (N, H, W, C).
instance_embedding: A [batch, output_height, output_width, embedding_size]
float tensor containing instance embeddings.
Returns:
A float tensor of size [batch, num_instances, 4] containing boxes whose
coordinates have been transformed to the absolute output space of the
feature extractor.
"""
# Input boxes must be normalized.
shape_utils.assert_box_normalized(provided_boxes)
# Transform the provided boxes to the absolute output space of the feature
# extractor.
height, width = (tf.shape(instance_embedding)[1],
tf.shape(instance_embedding)[2])
resized_image_height = resized_image_shape[1]
resized_image_width = resized_image_shape[2]
def transform_boxes(elems):
boxes_per_image, true_image_shape = elems
blist = box_list.BoxList(boxes_per_image)
# First transform boxes from image space to resized image space since
# there may have paddings in the resized images.
blist = box_list_ops.scale(blist,
true_image_shape[0] / resized_image_height,
true_image_shape[1] / resized_image_width)
# Then transform boxes from resized image space (normalized) to the
# feature map space (absolute).
blist = box_list_ops.to_absolute_coordinates(
blist, height, width, check_range=False)
return blist.get()
return tf.map_fn(
transform_boxes, [provided_boxes, true_image_shapes], dtype=tf.float32)
def predict_masks_from_boxes(self, prediction_dict, true_image_shapes,
provided_boxes, **params):
"""Produces masks for the provided boxes.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
provided_boxes: float tensor of shape [batch, num_boxes, 4] containing
boxes coordinates (normalized) from which we will produce masks.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
And all other fields returned by the super class method.
"""
postprocess_dict = super(DeepMACMetaArch,
self).postprocess(prediction_dict,
true_image_shapes, **params)
instance_embedding = prediction_dict[INSTANCE_EMBEDDING][-1]
resized_image_shapes = shape_utils.combined_static_and_dynamic_shape(
prediction_dict['preprocessed_inputs'])
boxes_strided = self._transform_boxes_to_feature_coordinates(
provided_boxes, true_image_shapes, resized_image_shapes,
instance_embedding)
if self._deepmac_params is not None:
masks = self._postprocess_masks(
boxes_strided, instance_embedding,
prediction_dict[PIXEL_EMBEDDING][-1])
postprocess_dict[fields.DetectionResultFields.detection_masks] = masks
return postprocess_dict
| 81,580 | 37.517941 | 101 | py |
models | models-master/research/object_detection/packages/tf2/setup.py | """Setup script for object_detection with TF2.0."""
import os
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
# Required for apache-beam with PY3
'avro-python3',
'apache-beam',
'pillow',
'lxml',
'matplotlib',
'Cython',
'contextlib2',
'tf-slim',
'six',
'pycocotools',
'lvis',
'scipy',
'pandas',
'tf-models-official>=2.5.1',
'tensorflow_io',
'keras',
'pyparsing==2.4.7', # TODO(b/204103388)
'sacrebleu<=2.2.0' # https://github.com/mjpost/sacrebleu/issues/209
]
setup(
name='object_detection',
version='0.1',
install_requires=REQUIRED_PACKAGES,
include_package_data=True,
packages=(
[p for p in find_packages() if p.startswith('object_detection')] +
find_packages(where=os.path.join('.', 'slim'))),
package_dir={
'datasets': os.path.join('slim', 'datasets'),
'nets': os.path.join('slim', 'nets'),
'preprocessing': os.path.join('slim', 'preprocessing'),
'deployment': os.path.join('slim', 'deployment'),
'scripts': os.path.join('slim', 'scripts'),
},
description='Tensorflow Object Detection Library',
python_requires='>3.6',
)
| 1,236 | 25.891304 | 74 | py |
models | models-master/research/object_detection/utils/model_util.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for manipulating Keras models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def extract_submodel(model, inputs, outputs, name=None):
"""Extracts a section of a Keras model into a new model.
This method walks an existing model from the specified outputs back to the
specified inputs in order to construct a new model containing only a portion
of the old model, while sharing the layers and weights with the original
model.
WARNING: This method does not work for submodels containing layers that have
been used multiple times in the original model, or in other models beyond
the original model. (E.g. does not work for submodels that contain layers that
use shared weights). This also means that multiple overlapping submodels
cannot be extracted from the same model.
It also relies on recursion and will hit python's recursion limit for large
submodels.
Args:
model: The existing Keras model this method extracts a submodel from.
inputs: The layer inputs in the existing model that start the submodel
outputs: The layer outputs in the existing model that should be output by
the submodel
name: The name for the extracted model
Returns:
The extracted submodel specified by the given inputs and outputs
"""
output_to_layer = {}
output_to_layer_input = {}
for layer in model.layers:
layer_output = layer.output
layer_inputs = layer.input
output_to_layer[layer_output.experimental_ref()] = layer
output_to_layer_input[layer_output.experimental_ref()] = layer_inputs
model_inputs_dict = {}
memoized_results = {}
# Relies on recursion, very low limit in python
def _recurse_in_model(tensor):
"""Walk the existing model recursively to copy a submodel."""
if tensor.experimental_ref() in memoized_results:
return memoized_results[tensor.experimental_ref()]
if (tensor.experimental_ref() == inputs.experimental_ref()) or (
isinstance(inputs, list) and tensor in inputs):
if tensor.experimental_ref() not in model_inputs_dict:
model_inputs_dict[tensor.experimental_ref()] = tf.keras.layers.Input(
tensor=tensor)
out = model_inputs_dict[tensor.experimental_ref()]
else:
cur_inputs = output_to_layer_input[tensor.experimental_ref()]
cur_layer = output_to_layer[tensor.experimental_ref()]
if isinstance(cur_inputs, list):
out = cur_layer([_recurse_in_model(inp) for inp in cur_inputs])
else:
out = cur_layer(_recurse_in_model(cur_inputs))
memoized_results[tensor.experimental_ref()] = out
return out
if isinstance(outputs, list):
model_outputs = [_recurse_in_model(tensor) for tensor in outputs]
else:
model_outputs = _recurse_in_model(outputs)
if isinstance(inputs, list):
model_inputs = [
model_inputs_dict[tensor.experimental_ref()] for tensor in inputs
]
else:
model_inputs = model_inputs_dict[inputs.experimental_ref()]
return tf.keras.Model(inputs=model_inputs, outputs=model_outputs, name=name)
| 3,848 | 38.680412 | 80 | py |
models | models-master/research/object_detection/utils/model_util_tf2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utility functions for manipulating Keras models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import tensorflow.compat.v1 as tf
from object_detection.utils import model_util
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ExtractSubmodelUtilTest(tf.test.TestCase):
def test_simple_model(self):
inputs = tf.keras.Input(shape=(256,)) # Returns a placeholder tensor
# A layer instance is callable on a tensor, and returns a tensor.
x = tf.keras.layers.Dense(128, activation='relu', name='a')(inputs)
x = tf.keras.layers.Dense(64, activation='relu', name='b')(x)
x = tf.keras.layers.Dense(32, activation='relu', name='c')(x)
x = tf.keras.layers.Dense(16, activation='relu', name='d')(x)
x = tf.keras.layers.Dense(8, activation='relu', name='e')(x)
predictions = tf.keras.layers.Dense(10, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs, outputs=predictions)
new_in = model.get_layer(
name='b').input
new_out = model.get_layer(
name='d').output
new_model = model_util.extract_submodel(
model=model,
inputs=new_in,
outputs=new_out)
batch_size = 3
ones = tf.ones((batch_size, 128))
final_out = new_model(ones)
self.assertAllEqual(final_out.shape, (batch_size, 16))
if __name__ == '__main__':
tf.test.main()
| 2,187 | 34.290323 | 80 | py |
models | models-master/research/object_detection/utils/bifpn_utils.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to manipulate feature map pyramids, such as for FPNs and BiFPNs.
Includes utility functions to facilitate feature pyramid map manipulations,
such as combining multiple feature maps, upsampling or downsampling feature
maps, and applying blocks of convolution, batchnorm, and activation layers.
"""
from six.moves import range
import tensorflow as tf
from object_detection.utils import ops
from object_detection.utils import shape_utils
def create_conv_block(name, num_filters, kernel_size, strides, padding,
use_separable, apply_batchnorm, apply_activation,
conv_hyperparams, is_training, freeze_batchnorm,
conv_bn_act_pattern=True):
"""Create Keras layers for regular or separable convolutions.
Args:
name: String. The name of the layer.
num_filters: Number of filters (channels) for the output feature maps.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters, or a single int if both values are the same.
strides: A list of length 2: [stride_height, stride_width], specifying the
convolution stride, or a single int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
use_separable: Bool. Whether to use depthwise separable convolution instead
of regular convolution.
apply_batchnorm: Bool. Whether to apply a batch normalization layer after
convolution, constructed according to the conv_hyperparams.
apply_activation: Bool. Whether to apply an activation layer after
convolution, constructed according to the conv_hyperparams.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
conv_bn_act_pattern: Bool. By default, when True, the layers returned by
this function are in the order [conv, batchnorm, activation]. Otherwise,
when False, the order of the layers is [activation, conv, batchnorm].
Returns:
A list of keras layers, including (regular or seperable) convolution, and
optionally batch normalization and activation layers.
"""
layers = []
if use_separable:
kwargs = conv_hyperparams.params()
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
# TODO(aom): Verify that the pointwise regularizer/initializer should be set
# here, since this is not the case in feature_map_generators.py
kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['pointwise_initializer'] = kwargs['kernel_initializer']
layers.append(
tf.keras.layers.SeparableConv2D(
filters=num_filters,
kernel_size=kernel_size,
depth_multiplier=1,
padding=padding,
strides=strides,
name=name + 'separable_conv',
**kwargs))
else:
layers.append(
tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=kernel_size,
padding=padding,
strides=strides,
name=name + 'conv',
**conv_hyperparams.params()))
if apply_batchnorm:
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=name + 'batchnorm'))
if apply_activation:
activation_layer = conv_hyperparams.build_activation_layer(
name=name + 'activation')
if conv_bn_act_pattern:
layers.append(activation_layer)
else:
layers = [activation_layer] + layers
return layers
def create_downsample_feature_map_ops(scale, downsample_method,
conv_hyperparams, is_training,
freeze_batchnorm, name):
"""Creates Keras layers for downsampling feature maps.
Args:
scale: Int. The scale factor by which to downsample input feature maps. For
example, in the case of a typical feature map pyramid, the scale factor
between level_i and level_i+1 is 2.
downsample_method: String. The method used for downsampling. Currently
supported methods include 'max_pooling', 'avg_pooling', and
'depthwise_conv'.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will downsample input feature maps by the
desired scale factor.
"""
layers = []
padding = 'SAME'
stride = int(scale)
kernel_size = stride + 1
if downsample_method == 'max_pooling':
layers.append(
tf.keras.layers.MaxPooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_max_x{}'.format(stride)))
elif downsample_method == 'avg_pooling':
layers.append(
tf.keras.layers.AveragePooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_avg_x{}'.format(stride)))
elif downsample_method == 'depthwise_conv':
layers.append(
tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_depthwise_x{}'.format(stride)))
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=name + 'downsample_batchnorm'))
layers.append(
conv_hyperparams.build_activation_layer(name=name +
'downsample_activation'))
else:
raise ValueError('Unknown downsample method: {}'.format(downsample_method))
return layers
def create_upsample_feature_map_ops(scale, use_native_resize_op, name):
"""Creates Keras layers for upsampling feature maps.
Args:
scale: Int. The scale factor by which to upsample input feature maps. For
example, in the case of a typical feature map pyramid, the scale factor
between level_i and level_i-1 is 2.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will upsample input feature maps by the
desired scale factor.
"""
layers = []
if use_native_resize_op:
def resize_nearest_neighbor(image):
image_shape = shape_utils.combined_static_and_dynamic_shape(image)
return tf.compat.v1.image.resize_nearest_neighbor(
image, [image_shape[1] * scale, image_shape[2] * scale])
layers.append(
tf.keras.layers.Lambda(
resize_nearest_neighbor,
name=name + 'nearest_neighbor_upsampling_x{}'.format(scale)))
else:
def nearest_neighbor_upsampling(image):
return ops.nearest_neighbor_upsampling(image, scale=scale)
layers.append(
tf.keras.layers.Lambda(
nearest_neighbor_upsampling,
name=name + 'nearest_neighbor_upsampling_x{}'.format(scale)))
return layers
def create_resample_feature_map_ops(input_scale_factor, output_scale_factor,
downsample_method, use_native_resize_op,
conv_hyperparams, is_training,
freeze_batchnorm, name):
"""Creates Keras layers for downsampling or upsampling feature maps.
Args:
input_scale_factor: Int. Scale factor of the input feature map. For example,
for a feature pyramid where each successive level halves its spatial
resolution, the scale factor of a level is 2^level. The input and output
scale factors are used to compute the scale for upsampling or downsamling,
so they should be evenly divisible.
output_scale_factor: Int. Scale factor of the output feature map. See
input_scale_factor for additional details.
downsample_method: String. The method used for downsampling. See
create_downsample_feature_map_ops for details on supported methods.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
See create_upsample_feature_map_ops for details.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will downsample or upsample input feature maps
to match the desired output feature map scale.
"""
if input_scale_factor < output_scale_factor:
if output_scale_factor % input_scale_factor != 0:
raise ValueError('Invalid scale factor: input scale 1/{} not divisible by'
'output scale 1/{}'.format(input_scale_factor,
output_scale_factor))
scale = output_scale_factor // input_scale_factor
return create_downsample_feature_map_ops(scale, downsample_method,
conv_hyperparams, is_training,
freeze_batchnorm, name)
elif input_scale_factor > output_scale_factor:
if input_scale_factor % output_scale_factor != 0:
raise ValueError('Invalid scale factor: input scale 1/{} not a divisor of'
'output scale 1/{}'.format(input_scale_factor,
output_scale_factor))
scale = input_scale_factor // output_scale_factor
return create_upsample_feature_map_ops(scale, use_native_resize_op, name)
else:
return []
# TODO(aom): Add tests for this module in a followup CL.
class BiFPNCombineLayer(tf.keras.layers.Layer):
"""Combines multiple input feature maps into a single output feature map.
A Keras layer which combines multiple input feature maps into a single output
feature map, according to the desired combination method. Options for
combining feature maps include simple summation, or several types of weighted
sums using learned weights for each input feature map. These include
'weighted_sum', 'attention', and 'fast_attention'. For more details, see the
EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070.
Specifically, this layer takes a list of tensors as input, all of the same
shape, and returns a single tensor, also of the same shape.
"""
def __init__(self, combine_method, **kwargs):
"""Constructor.
Args:
combine_method: String. The method used to combine the input feature maps
into a single output feature map. One of 'sum', 'weighted_sum',
'attention', or 'fast_attention'.
**kwargs: Additional Keras layer arguments.
"""
super(BiFPNCombineLayer, self).__init__(**kwargs)
self.combine_method = combine_method
def _combine_weighted_sum(self, inputs):
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), self.per_input_weights),
axis=[-1])
def _combine_attention(self, inputs):
normalized_weights = tf.nn.softmax(self.per_input_weights)
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), normalized_weights),
axis=[-1])
def _combine_fast_attention(self, inputs):
weights_non_neg = tf.nn.relu(self.per_input_weights)
normalizer = tf.reduce_sum(weights_non_neg) + 0.0001
normalized_weights = weights_non_neg / normalizer
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), normalized_weights),
axis=[-1])
def build(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('A BiFPN combine layer should be called '
'on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A BiFPN combine layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
if self.combine_method == 'sum':
self._combine_op = tf.keras.layers.Add()
elif self.combine_method == 'weighted_sum':
self._combine_op = self._combine_weighted_sum
elif self.combine_method == 'attention':
self._combine_op = self._combine_attention
elif self.combine_method == 'fast_attention':
self._combine_op = self._combine_fast_attention
else:
raise ValueError('Unknown combine type: {}'.format(self.combine_method))
if self.combine_method in {'weighted_sum', 'attention', 'fast_attention'}:
self.per_input_weights = self.add_weight(
name='bifpn_combine_weights',
shape=(len(input_shape), 1),
initializer='ones',
trainable=True)
super(BiFPNCombineLayer, self).build(input_shape)
def call(self, inputs):
"""Combines multiple input feature maps into a single output feature map.
Executed when calling the `.__call__` method on input.
Args:
inputs: A list of tensors where all tensors have the same shape, [batch,
height_i, width_i, depth_i].
Returns:
A single tensor, with the same shape as the input tensors,
[batch, height_i, width_i, depth_i].
"""
return self._combine_op(inputs)
def compute_output_shape(self, input_shape):
output_shape = input_shape[0]
for i in range(1, len(input_shape)):
if input_shape[i] != output_shape:
raise ValueError(
'Inputs could not be combined. Shapes should match, '
'but input_shape[0] is {} while input_shape[{}] is {}'.format(
output_shape, i, input_shape[i]))
| 15,597 | 42.448468 | 80 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generate_detection_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_detection_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
'detection_scores': tf.constant([[0.95, 0.6]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2],
[0.3, 0.1, 0.6]]],
tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32)
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(filename)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateDetectionDataTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
"""A function to save checkpoint from a fake Detection Model.
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
with self.test_session():
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(b'image_id'),
'image/height': Int64Feature(4),
'image/width': Int64Feature(6),
'image/object/class/label': Int64Feature(5),
'image/object/class/text': BytesFeature(b'hyena'),
'image/class/label': Int64Feature(5),
'image/class/text': BytesFeature(b'hyena'),
}))
return example.SerializeToString()
def assert_expected_example(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/class/score']
.float_list.value, [0.95])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'hyena'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value, [4])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value, [6])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'image_id'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_generate_detection_data_fn(self):
saved_model_path = self._export_saved_model()
confidence_threshold = 0.8
inference_fn = generate_detection_data.GenerateDetectionDataFn(
saved_model_path, confidence_threshold)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
output = inference_fn.process(generated_example)
output_example = output[0]
self.assertAllEqual(
output_example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(output_example.features.feature['image/width']
.int64_list.value, [6])
self.assert_expected_example(output_example)
def test_beam_pipeline(self):
with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
saved_model_path = self._export_saved_model()
confidence_threshold = 0.8
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
generate_detection_data.construct_pipeline(
p, input_tfrecord, output_tfrecord, saved_model_path,
confidence_threshold, num_shards)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| 9,871 | 36.823755 | 119 | py |
models | models-master/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generate_embedding_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_embedding_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(prediction_dict.values()):
num_features = 100
feature_dims = 10
classifier_feature = np.ones(
(2, feature_dims, feature_dims, num_features),
dtype=np.float32).tolist()
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
'detection_scores': tf.constant([[0.95, 0.6]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2],
[0.3, 0.1, 0.6]]],
tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32),
'detection_features':
tf.constant([classifier_feature],
tf.float32)
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(temp.name)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateEmbeddingData(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
"""A function to save checkpoint from a fake Detection Model.
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def FloatFeature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(b'image_id'),
'image/height': Int64Feature(400),
'image/width': Int64Feature(600),
'image/class/label': Int64Feature(5),
'image/class/text': BytesFeature(b'hyena'),
'image/object/bbox/xmin': FloatFeature(0.1),
'image/object/bbox/xmax': FloatFeature(0.6),
'image/object/bbox/ymin': FloatFeature(0.0),
'image/object/bbox/ymax': FloatFeature(0.5),
'image/object/class/score': FloatFeature(0.95),
'image/object/class/label': Int64Feature(5),
'image/object/class/text': BytesFeature(b'hyena'),
'image/date_captured': BytesFeature(b'2019-10-20 12:12:12')
}))
return example.SerializeToString()
def assert_expected_example(self, example, topk=False, botk=False):
# Check embeddings
if topk or botk:
self.assertEqual(len(
example.features.feature['image/embedding'].float_list.value),
218)
self.assertAllEqual(
example.features.feature['image/embedding_count'].int64_list.value,
[2])
else:
self.assertEqual(len(
example.features.feature['image/embedding'].float_list.value),
109)
self.assertAllEqual(
example.features.feature['image/embedding_count'].int64_list.value,
[1])
self.assertAllEqual(
example.features.feature['image/embedding_length'].int64_list.value,
[109])
# Check annotations
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/class/score']
.float_list.value, [0.95])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'hyena'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value, [400])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value, [600])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'image_id'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_generate_embedding_data_fn(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 1
bottom_k_embedding_count = 0
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example)
def test_generate_embedding_data_with_top_k_boxes(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 2
bottom_k_embedding_count = 0
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/label'].int64_list.value, [5])
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/text'].bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example, topk=True)
def test_generate_embedding_data_with_bottom_k_boxes(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 0
bottom_k_embedding_count = 2
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/label'].int64_list.value, [5])
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/text'].bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example, botk=True)
def test_beam_pipeline(self):
with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
saved_model_path = self._export_saved_model()
top_k_embedding_count = 1
bottom_k_embedding_count = 0
num_shards = 1
embedding_type = 'final_box_features'
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
generate_embedding_data.construct_pipeline(
p, input_tfrecord, output_tfrecord, saved_model_path,
top_k_embedding_count, bottom_k_embedding_count, num_shards,
embedding_type)
p.run()
filenames = tf.io.gfile.glob(
output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| 13,157 | 38.63253 | 119 | py |
models | models-master/research/object_detection/builders/optimizer_builder_tf2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer_builder."""
import unittest
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import optimizer_builder
from object_detection.protos import optimizer_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class OptimizerBuilderV2Test(tf.test.TestCase):
"""Test building optimizers in V2 mode."""
def testBuildRMSPropOptimizer(self):
optimizer_text_proto = """
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.RMSprop)
def testBuildMomentumOptimizer(self):
optimizer_text_proto = """
momentum_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.001
}
}
momentum_optimizer_value: 0.99
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.SGD)
def testBuildAdamOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.Adam)
def testBuildMovingAverageOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.Optimizer)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 3,378 | 31.180952 | 80 | py |
models | models-master/research/object_detection/builders/model_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build a DetectionModel from configuration."""
import functools
import sys
from absl import logging
from object_detection.builders import anchor_generator_builder
from object_detection.builders import box_coder_builder
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.builders import image_resizer_builder
from object_detection.builders import losses_builder
from object_detection.builders import matcher_builder
from object_detection.builders import post_processing_builder
from object_detection.builders import region_similarity_calculator_builder as sim_calc
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import post_processing
from object_detection.core import target_assigner
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.meta_architectures import context_rcnn_meta_arch
from object_detection.meta_architectures import deepmac_meta_arch
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.meta_architectures import rfcn_meta_arch
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.predictors.heads import mask_head
from object_detection.protos import losses_pb2
from object_detection.protos import model_pb2
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import tf_version
## Feature Extractors for TF
## This section conditionally imports different feature extractors based on the
## Tensorflow version.
##
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.models import center_net_hourglass_feature_extractor
from object_detection.models import center_net_mobilenet_v2_feature_extractor
from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor
from object_detection.models import center_net_resnet_feature_extractor
from object_detection.models import center_net_resnet_v1_fpn_feature_extractor
from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res_keras
from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_resnet_keras
from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor as ssd_resnet_v1_fpn_keras
from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_resnet_fpn_keras
from object_detection.models.ssd_mobilenet_v1_fpn_keras_feature_extractor import SSDMobileNetV1FpnKerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_fpn_keras_feature_extractor import SSDMobileNetV2FpnKerasFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor
from object_detection.predictors import rfcn_keras_box_predictor
if sys.version_info[0] >= 3:
from object_detection.models import ssd_efficientnet_bifpn_feature_extractor as ssd_efficientnet_bifpn
if tf_version.is_tf1():
from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res
from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2
from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas
from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas
from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn
from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn
from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor
from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor
from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_mnasfpn_feature_extractor import SSDMobileNetV2MnasFPNFeatureExtractor
from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor
from object_detection.models.ssd_mobilenet_edgetpu_feature_extractor import SSDMobileNetEdgeTPUFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor
from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor
from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3LargeFeatureExtractor
from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallFeatureExtractor
from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallPrunedFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetCPUFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetDSPFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetEdgeTPUFeatureExtractor
from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetGPUFeatureExtractor
from object_detection.models.ssd_spaghettinet_feature_extractor import SSDSpaghettinetFeatureExtractor
from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor
from object_detection.predictors import rfcn_box_predictor
# pylint: enable=g-import-not-at-top
if tf_version.is_tf2():
SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = {
'ssd_mobilenet_v1_keras': SSDMobileNetV1KerasFeatureExtractor,
'ssd_mobilenet_v1_fpn_keras': SSDMobileNetV1FpnKerasFeatureExtractor,
'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor,
'ssd_mobilenet_v2_fpn_keras': SSDMobileNetV2FpnKerasFeatureExtractor,
'ssd_resnet50_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet50V1FpnKerasFeatureExtractor,
'ssd_resnet101_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet101V1FpnKerasFeatureExtractor,
'ssd_resnet152_v1_fpn_keras':
ssd_resnet_v1_fpn_keras.SSDResNet152V1FpnKerasFeatureExtractor,
'ssd_efficientnet-b0_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB0BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b1_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB1BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b2_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB2BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b3_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB3BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b4_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB4BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b5_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB5BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b6_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB6BiFPNKerasFeatureExtractor,
'ssd_efficientnet-b7_bifpn_keras':
ssd_efficientnet_bifpn.SSDEfficientNetB7BiFPNKerasFeatureExtractor,
}
FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = {
'faster_rcnn_resnet50_keras':
frcnn_resnet_keras.FasterRCNNResnet50KerasFeatureExtractor,
'faster_rcnn_resnet101_keras':
frcnn_resnet_keras.FasterRCNNResnet101KerasFeatureExtractor,
'faster_rcnn_resnet152_keras':
frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor,
'faster_rcnn_inception_resnet_v2_keras':
frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor,
'faster_rcnn_resnet50_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet50FpnKerasFeatureExtractor,
'faster_rcnn_resnet101_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet101FpnKerasFeatureExtractor,
'faster_rcnn_resnet152_fpn_keras':
frcnn_resnet_fpn_keras.FasterRCNNResnet152FpnKerasFeatureExtractor,
}
CENTER_NET_EXTRACTOR_FUNCTION_MAP = {
'resnet_v2_50':
center_net_resnet_feature_extractor.resnet_v2_50,
'resnet_v2_101':
center_net_resnet_feature_extractor.resnet_v2_101,
'resnet_v1_18_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_18_fpn,
'resnet_v1_34_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_34_fpn,
'resnet_v1_50_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_50_fpn,
'resnet_v1_101_fpn':
center_net_resnet_v1_fpn_feature_extractor.resnet_v1_101_fpn,
'hourglass_10':
center_net_hourglass_feature_extractor.hourglass_10,
'hourglass_20':
center_net_hourglass_feature_extractor.hourglass_20,
'hourglass_32':
center_net_hourglass_feature_extractor.hourglass_32,
'hourglass_52':
center_net_hourglass_feature_extractor.hourglass_52,
'hourglass_104':
center_net_hourglass_feature_extractor.hourglass_104,
'mobilenet_v2':
center_net_mobilenet_v2_feature_extractor.mobilenet_v2,
'mobilenet_v2_fpn':
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn,
'mobilenet_v2_fpn_sep_conv':
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn,
}
FEATURE_EXTRACTOR_MAPS = [
CENTER_NET_EXTRACTOR_FUNCTION_MAP,
FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP,
SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
]
if tf_version.is_tf1():
SSD_FEATURE_EXTRACTOR_CLASS_MAP = {
'ssd_inception_v2':
SSDInceptionV2FeatureExtractor,
'ssd_inception_v3':
SSDInceptionV3FeatureExtractor,
'ssd_mobilenet_v1':
SSDMobileNetV1FeatureExtractor,
'ssd_mobilenet_v1_fpn':
SSDMobileNetV1FpnFeatureExtractor,
'ssd_mobilenet_v1_ppn':
SSDMobileNetV1PpnFeatureExtractor,
'ssd_mobilenet_v2':
SSDMobileNetV2FeatureExtractor,
'ssd_mobilenet_v2_fpn':
SSDMobileNetV2FpnFeatureExtractor,
'ssd_mobilenet_v2_mnasfpn':
SSDMobileNetV2MnasFPNFeatureExtractor,
'ssd_mobilenet_v3_large':
SSDMobileNetV3LargeFeatureExtractor,
'ssd_mobilenet_v3_small':
SSDMobileNetV3SmallFeatureExtractor,
'ssd_mobilenet_v3_small_pruned':
SSDMobileNetV3SmallPrunedFeatureExtractor,
'ssd_mobilenet_edgetpu':
SSDMobileNetEdgeTPUFeatureExtractor,
'ssd_resnet50_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor,
'ssd_resnet101_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor,
'ssd_resnet152_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor,
'ssd_resnet50_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor,
'ssd_resnet101_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor,
'ssd_resnet152_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor,
'embedded_ssd_mobilenet_v1':
EmbeddedSSDMobileNetV1FeatureExtractor,
'ssd_pnasnet':
SSDPNASNetFeatureExtractor,
'ssd_mobiledet_cpu':
SSDMobileDetCPUFeatureExtractor,
'ssd_mobiledet_dsp':
SSDMobileDetDSPFeatureExtractor,
'ssd_mobiledet_edgetpu':
SSDMobileDetEdgeTPUFeatureExtractor,
'ssd_mobiledet_gpu':
SSDMobileDetGPUFeatureExtractor,
'ssd_spaghettinet':
SSDSpaghettinetFeatureExtractor,
}
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = {
'faster_rcnn_nas':
frcnn_nas.FasterRCNNNASFeatureExtractor,
'faster_rcnn_pnas':
frcnn_pnas.FasterRCNNPNASFeatureExtractor,
'faster_rcnn_inception_resnet_v2':
frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor,
'faster_rcnn_inception_v2':
frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor,
'faster_rcnn_resnet50':
frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor,
'faster_rcnn_resnet101':
frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor,
'faster_rcnn_resnet152':
frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor,
}
CENTER_NET_EXTRACTOR_FUNCTION_MAP = {}
FEATURE_EXTRACTOR_MAPS = [
SSD_FEATURE_EXTRACTOR_CLASS_MAP,
FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP,
CENTER_NET_EXTRACTOR_FUNCTION_MAP
]
def _check_feature_extractor_exists(feature_extractor_type):
feature_extractors = set().union(*FEATURE_EXTRACTOR_MAPS)
if feature_extractor_type not in feature_extractors:
tf_version_str = '2' if tf_version.is_tf2() else '1'
raise ValueError(
'{} is not supported for tf version {}. See `model_builder.py` for '
'features extractors compatible with different versions of '
'Tensorflow'.format(feature_extractor_type, tf_version_str))
def _build_ssd_feature_extractor(feature_extractor_config,
is_training,
freeze_batchnorm,
reuse_weights=None):
"""Builds a ssd_meta_arch.SSDFeatureExtractor based on config.
Args:
feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto.
is_training: True if this feature extractor is being built for training.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
reuse_weights: if the feature extractor should reuse weights.
Returns:
ssd_meta_arch.SSDFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
feature_type = feature_extractor_config.type
depth_multiplier = feature_extractor_config.depth_multiplier
min_depth = feature_extractor_config.min_depth
pad_to_multiple = feature_extractor_config.pad_to_multiple
use_explicit_padding = feature_extractor_config.use_explicit_padding
use_depthwise = feature_extractor_config.use_depthwise
is_keras = tf_version.is_tf2()
if is_keras:
conv_hyperparams = hyperparams_builder.KerasLayerHyperparams(
feature_extractor_config.conv_hyperparams)
else:
conv_hyperparams = hyperparams_builder.build(
feature_extractor_config.conv_hyperparams, is_training)
override_base_feature_extractor_hyperparams = (
feature_extractor_config.override_base_feature_extractor_hyperparams)
if not is_keras and feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type))
if is_keras:
feature_extractor_class = SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
else:
feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type]
kwargs = {
'is_training':
is_training,
'depth_multiplier':
depth_multiplier,
'min_depth':
min_depth,
'pad_to_multiple':
pad_to_multiple,
'use_explicit_padding':
use_explicit_padding,
'use_depthwise':
use_depthwise,
'override_base_feature_extractor_hyperparams':
override_base_feature_extractor_hyperparams
}
if feature_extractor_config.HasField('replace_preprocessor_with_placeholder'):
kwargs.update({
'replace_preprocessor_with_placeholder':
feature_extractor_config.replace_preprocessor_with_placeholder
})
if feature_extractor_config.HasField('num_layers'):
kwargs.update({'num_layers': feature_extractor_config.num_layers})
if is_keras:
kwargs.update({
'conv_hyperparams': conv_hyperparams,
'inplace_batchnorm_update': False,
'freeze_batchnorm': freeze_batchnorm
})
else:
kwargs.update({
'conv_hyperparams_fn': conv_hyperparams,
'reuse_weights': reuse_weights,
})
if feature_extractor_config.HasField('spaghettinet_arch_name'):
kwargs.update({
'spaghettinet_arch_name':
feature_extractor_config.spaghettinet_arch_name,
})
if feature_extractor_config.HasField('fpn'):
kwargs.update({
'fpn_min_level':
feature_extractor_config.fpn.min_level,
'fpn_max_level':
feature_extractor_config.fpn.max_level,
'additional_layer_depth':
feature_extractor_config.fpn.additional_layer_depth,
})
if feature_extractor_config.HasField('bifpn'):
kwargs.update({
'bifpn_min_level':
feature_extractor_config.bifpn.min_level,
'bifpn_max_level':
feature_extractor_config.bifpn.max_level,
'bifpn_num_iterations':
feature_extractor_config.bifpn.num_iterations,
'bifpn_num_filters':
feature_extractor_config.bifpn.num_filters,
'bifpn_combine_method':
feature_extractor_config.bifpn.combine_method,
'use_native_resize_op':
feature_extractor_config.bifpn.use_native_resize_op,
})
return feature_extractor_class(**kwargs)
def _build_ssd_model(ssd_config, is_training, add_summaries):
"""Builds an SSD detection model based on the model config.
Args:
ssd_config: A ssd.proto object containing the config for the desired
SSDMetaArch.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
SSDMetaArch based on the config.
Raises:
ValueError: If ssd_config.type is not recognized (i.e. not registered in
model_class_map).
"""
num_classes = ssd_config.num_classes
_check_feature_extractor_exists(ssd_config.feature_extractor.type)
# Feature extractor
feature_extractor = _build_ssd_feature_extractor(
feature_extractor_config=ssd_config.feature_extractor,
freeze_batchnorm=ssd_config.freeze_batchnorm,
is_training=is_training)
box_coder = box_coder_builder.build(ssd_config.box_coder)
matcher = matcher_builder.build(ssd_config.matcher)
region_similarity_calculator = sim_calc.build(
ssd_config.similarity_calculator)
encode_background_as_zeros = ssd_config.encode_background_as_zeros
negative_class_weight = ssd_config.negative_class_weight
anchor_generator = anchor_generator_builder.build(
ssd_config.anchor_generator)
if feature_extractor.is_keras_model:
ssd_box_predictor = box_predictor_builder.build_keras(
hyperparams_fn=hyperparams_builder.KerasLayerHyperparams,
freeze_batchnorm=ssd_config.freeze_batchnorm,
inplace_batchnorm_update=False,
num_predictions_per_location_list=anchor_generator
.num_anchors_per_location(),
box_predictor_config=ssd_config.box_predictor,
is_training=is_training,
num_classes=num_classes,
add_background_class=ssd_config.add_background_class)
else:
ssd_box_predictor = box_predictor_builder.build(
hyperparams_builder.build, ssd_config.box_predictor, is_training,
num_classes, ssd_config.add_background_class)
image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer)
non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(
ssd_config.post_processing)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, random_example_sampler,
expected_loss_weights_fn) = losses_builder.build(ssd_config.loss)
normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches
normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize
equalization_loss_config = ops.EqualizationLossConfig(
weight=ssd_config.loss.equalization_loss.weight,
exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes)
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
matcher,
box_coder,
negative_class_weight=negative_class_weight)
ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch
kwargs = {}
return ssd_meta_arch_fn(
is_training=is_training,
anchor_generator=anchor_generator,
box_predictor=ssd_box_predictor,
box_coder=box_coder,
feature_extractor=feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_weight,
localization_loss_weight=localization_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=add_summaries,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
freeze_batchnorm=ssd_config.freeze_batchnorm,
inplace_batchnorm_update=ssd_config.inplace_batchnorm_update,
add_background_class=ssd_config.add_background_class,
explicit_background_class=ssd_config.explicit_background_class,
random_example_sampler=random_example_sampler,
expected_loss_weights_fn=expected_loss_weights_fn,
use_confidences_as_targets=ssd_config.use_confidences_as_targets,
implicit_example_weight=ssd_config.implicit_example_weight,
equalization_loss_config=equalization_loss_config,
return_raw_detections_during_predict=(
ssd_config.return_raw_detections_during_predict),
**kwargs)
def _build_faster_rcnn_feature_extractor(
feature_extractor_config, is_training, reuse_weights=True,
inplace_batchnorm_update=False):
"""Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config.
Args:
feature_extractor_config: A FasterRcnnFeatureExtractor proto config from
faster_rcnn.proto.
is_training: True if this feature extractor is being built for training.
reuse_weights: if the feature extractor should reuse weights.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs. When
this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
Returns:
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
if inplace_batchnorm_update:
raise ValueError('inplace batchnorm updates not supported.')
feature_type = feature_extractor_config.type
first_stage_features_stride = (
feature_extractor_config.first_stage_features_stride)
batch_norm_trainable = feature_extractor_config.batch_norm_trainable
if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format(
feature_type))
feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
return feature_extractor_class(
is_training, first_stage_features_stride,
batch_norm_trainable, reuse_weights=reuse_weights)
def _build_faster_rcnn_keras_feature_extractor(
feature_extractor_config, is_training,
inplace_batchnorm_update=False):
"""Builds a faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor from config.
Args:
feature_extractor_config: A FasterRcnnFeatureExtractor proto config from
faster_rcnn.proto.
is_training: True if this feature extractor is being built for training.
inplace_batchnorm_update: Whether to update batch_norm inplace during
training. This is required for batch norm to work correctly on TPUs. When
this is false, user must add a control dependency on
tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch
norm moving average parameters.
Returns:
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor based on config.
Raises:
ValueError: On invalid feature extractor type.
"""
if inplace_batchnorm_update:
raise ValueError('inplace batchnorm updates not supported.')
feature_type = feature_extractor_config.type
first_stage_features_stride = (
feature_extractor_config.first_stage_features_stride)
batch_norm_trainable = feature_extractor_config.batch_norm_trainable
if feature_type not in FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP:
raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format(
feature_type))
feature_extractor_class = FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[
feature_type]
kwargs = {}
if feature_extractor_config.HasField('conv_hyperparams'):
kwargs.update({
'conv_hyperparams':
hyperparams_builder.KerasLayerHyperparams(
feature_extractor_config.conv_hyperparams),
'override_base_feature_extractor_hyperparams':
feature_extractor_config.override_base_feature_extractor_hyperparams
})
if feature_extractor_config.HasField('fpn'):
kwargs.update({
'fpn_min_level':
feature_extractor_config.fpn.min_level,
'fpn_max_level':
feature_extractor_config.fpn.max_level,
'additional_layer_depth':
feature_extractor_config.fpn.additional_layer_depth,
})
return feature_extractor_class(
is_training, first_stage_features_stride,
batch_norm_trainable, **kwargs)
def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries):
"""Builds a Faster R-CNN or R-FCN detection model based on the model config.
Builds R-FCN model if the second_stage_box_predictor in the config is of type
`rfcn_box_predictor` else builds a Faster R-CNN model.
Args:
frcnn_config: A faster_rcnn.proto object containing the config for the
desired FasterRCNNMetaArch or RFCNMetaArch.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
FasterRCNNMetaArch based on the config.
Raises:
ValueError: If frcnn_config.type is not recognized (i.e. not registered in
model_class_map).
"""
num_classes = frcnn_config.num_classes
image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer)
_check_feature_extractor_exists(frcnn_config.feature_extractor.type)
is_keras = tf_version.is_tf2()
if is_keras:
feature_extractor = _build_faster_rcnn_keras_feature_extractor(
frcnn_config.feature_extractor, is_training,
inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)
else:
feature_extractor = _build_faster_rcnn_feature_extractor(
frcnn_config.feature_extractor, is_training,
inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update)
number_of_stages = frcnn_config.number_of_stages
first_stage_anchor_generator = anchor_generator_builder.build(
frcnn_config.first_stage_anchor_generator)
first_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'proposal',
use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate
if is_keras:
first_stage_box_predictor_arg_scope_fn = (
hyperparams_builder.KerasLayerHyperparams(
frcnn_config.first_stage_box_predictor_conv_hyperparams))
else:
first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build(
frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training)
first_stage_box_predictor_kernel_size = (
frcnn_config.first_stage_box_predictor_kernel_size)
first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth
first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size
use_static_shapes = frcnn_config.use_static_shapes and (
frcnn_config.use_static_shapes_for_eval or is_training)
first_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=frcnn_config.first_stage_positive_balance_fraction,
is_static=(frcnn_config.use_static_balanced_label_sampler and
use_static_shapes))
first_stage_max_proposals = frcnn_config.first_stage_max_proposals
if (frcnn_config.first_stage_nms_iou_threshold < 0 or
frcnn_config.first_stage_nms_iou_threshold > 1.0):
raise ValueError('iou_threshold not in [0, 1.0].')
if (is_training and frcnn_config.second_stage_batch_size >
first_stage_max_proposals):
raise ValueError('second_stage_batch_size should be no greater than '
'first_stage_max_proposals.')
first_stage_non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=frcnn_config.first_stage_nms_score_threshold,
iou_thresh=frcnn_config.first_stage_nms_iou_threshold,
max_size_per_class=frcnn_config.first_stage_max_proposals,
max_total_size=frcnn_config.first_stage_max_proposals,
use_static_shapes=use_static_shapes,
use_partitioned_nms=frcnn_config.use_partitioned_nms_in_first_stage,
use_combined_nms=frcnn_config.use_combined_nms_in_first_stage)
first_stage_loc_loss_weight = (
frcnn_config.first_stage_localization_loss_weight)
first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight
initial_crop_size = frcnn_config.initial_crop_size
maxpool_kernel_size = frcnn_config.maxpool_kernel_size
maxpool_stride = frcnn_config.maxpool_stride
second_stage_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN',
'detection',
use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher)
if is_keras:
second_stage_box_predictor = box_predictor_builder.build_keras(
hyperparams_builder.KerasLayerHyperparams,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[1],
box_predictor_config=frcnn_config.second_stage_box_predictor,
is_training=is_training,
num_classes=num_classes)
else:
second_stage_box_predictor = box_predictor_builder.build(
hyperparams_builder.build,
frcnn_config.second_stage_box_predictor,
is_training=is_training,
num_classes=num_classes)
second_stage_batch_size = frcnn_config.second_stage_batch_size
second_stage_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=frcnn_config.second_stage_balance_fraction,
is_static=(frcnn_config.use_static_balanced_label_sampler and
use_static_shapes))
(second_stage_non_max_suppression_fn, second_stage_score_conversion_fn
) = post_processing_builder.build(frcnn_config.second_stage_post_processing)
second_stage_localization_loss_weight = (
frcnn_config.second_stage_localization_loss_weight)
second_stage_classification_loss = (
losses_builder.build_faster_rcnn_classification_loss(
frcnn_config.second_stage_classification_loss))
second_stage_classification_loss_weight = (
frcnn_config.second_stage_classification_loss_weight)
second_stage_mask_prediction_loss_weight = (
frcnn_config.second_stage_mask_prediction_loss_weight)
hard_example_miner = None
if frcnn_config.HasField('hard_example_miner'):
hard_example_miner = losses_builder.build_hard_example_miner(
frcnn_config.hard_example_miner,
second_stage_classification_loss_weight,
second_stage_localization_loss_weight)
crop_and_resize_fn = (
spatial_ops.multilevel_matmul_crop_and_resize
if frcnn_config.use_matmul_crop_and_resize
else spatial_ops.multilevel_native_crop_and_resize)
clip_anchors_to_image = (
frcnn_config.clip_anchors_to_image)
common_kwargs = {
'is_training':
is_training,
'num_classes':
num_classes,
'image_resizer_fn':
image_resizer_fn,
'feature_extractor':
feature_extractor,
'number_of_stages':
number_of_stages,
'first_stage_anchor_generator':
first_stage_anchor_generator,
'first_stage_target_assigner':
first_stage_target_assigner,
'first_stage_atrous_rate':
first_stage_atrous_rate,
'first_stage_box_predictor_arg_scope_fn':
first_stage_box_predictor_arg_scope_fn,
'first_stage_box_predictor_kernel_size':
first_stage_box_predictor_kernel_size,
'first_stage_box_predictor_depth':
first_stage_box_predictor_depth,
'first_stage_minibatch_size':
first_stage_minibatch_size,
'first_stage_sampler':
first_stage_sampler,
'first_stage_non_max_suppression_fn':
first_stage_non_max_suppression_fn,
'first_stage_max_proposals':
first_stage_max_proposals,
'first_stage_localization_loss_weight':
first_stage_loc_loss_weight,
'first_stage_objectness_loss_weight':
first_stage_obj_loss_weight,
'second_stage_target_assigner':
second_stage_target_assigner,
'second_stage_batch_size':
second_stage_batch_size,
'second_stage_sampler':
second_stage_sampler,
'second_stage_non_max_suppression_fn':
second_stage_non_max_suppression_fn,
'second_stage_score_conversion_fn':
second_stage_score_conversion_fn,
'second_stage_localization_loss_weight':
second_stage_localization_loss_weight,
'second_stage_classification_loss':
second_stage_classification_loss,
'second_stage_classification_loss_weight':
second_stage_classification_loss_weight,
'hard_example_miner':
hard_example_miner,
'add_summaries':
add_summaries,
'crop_and_resize_fn':
crop_and_resize_fn,
'clip_anchors_to_image':
clip_anchors_to_image,
'use_static_shapes':
use_static_shapes,
'resize_masks':
frcnn_config.resize_masks,
'return_raw_detections_during_predict':
frcnn_config.return_raw_detections_during_predict,
'output_final_box_features':
frcnn_config.output_final_box_features,
'output_final_box_rpn_features':
frcnn_config.output_final_box_rpn_features,
}
if ((not is_keras and isinstance(second_stage_box_predictor,
rfcn_box_predictor.RfcnBoxPredictor)) or
(is_keras and
isinstance(second_stage_box_predictor,
rfcn_keras_box_predictor.RfcnKerasBoxPredictor))):
return rfcn_meta_arch.RFCNMetaArch(
second_stage_rfcn_box_predictor=second_stage_box_predictor,
**common_kwargs)
elif frcnn_config.HasField('context_config'):
context_config = frcnn_config.context_config
common_kwargs.update({
'attention_bottleneck_dimension':
context_config.attention_bottleneck_dimension,
'attention_temperature':
context_config.attention_temperature,
'use_self_attention':
context_config.use_self_attention,
'use_long_term_attention':
context_config.use_long_term_attention,
'self_attention_in_sequence':
context_config.self_attention_in_sequence,
'num_attention_heads':
context_config.num_attention_heads,
'num_attention_layers':
context_config.num_attention_layers,
'attention_position':
context_config.attention_position
})
return context_rcnn_meta_arch.ContextRCNNMetaArch(
initial_crop_size=initial_crop_size,
maxpool_kernel_size=maxpool_kernel_size,
maxpool_stride=maxpool_stride,
second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
second_stage_mask_prediction_loss_weight=(
second_stage_mask_prediction_loss_weight),
**common_kwargs)
else:
return faster_rcnn_meta_arch.FasterRCNNMetaArch(
initial_crop_size=initial_crop_size,
maxpool_kernel_size=maxpool_kernel_size,
maxpool_stride=maxpool_stride,
second_stage_mask_rcnn_box_predictor=second_stage_box_predictor,
second_stage_mask_prediction_loss_weight=(
second_stage_mask_prediction_loss_weight),
**common_kwargs)
EXPERIMENTAL_META_ARCH_BUILDER_MAP = {
}
def _build_experimental_model(config, is_training, add_summaries=True):
return EXPERIMENTAL_META_ARCH_BUILDER_MAP[config.name](
is_training, add_summaries)
# The class ID in the groundtruth/model architecture is usually 0-based while
# the ID in the label map is 1-based. The offset is used to convert between the
# the two.
CLASS_ID_OFFSET = 1
KEYPOINT_STD_DEV_DEFAULT = 1.0
def keypoint_proto_to_params(kp_config, keypoint_map_dict):
"""Converts CenterNet.KeypointEstimation proto to parameter namedtuple."""
label_map_item = keypoint_map_dict[kp_config.keypoint_class_name]
classification_loss, localization_loss, _, _, _, _, _ = (
losses_builder.build(kp_config.loss))
keypoint_indices = [
keypoint.id for keypoint in label_map_item.keypoints
]
keypoint_labels = [
keypoint.label for keypoint in label_map_item.keypoints
]
keypoint_std_dev_dict = {
label: KEYPOINT_STD_DEV_DEFAULT for label in keypoint_labels
}
if kp_config.keypoint_label_to_std:
for label, value in kp_config.keypoint_label_to_std.items():
keypoint_std_dev_dict[label] = value
keypoint_std_dev = [keypoint_std_dev_dict[label] for label in keypoint_labels]
if kp_config.HasField('heatmap_head_params'):
heatmap_head_num_filters = list(kp_config.heatmap_head_params.num_filters)
heatmap_head_kernel_sizes = list(kp_config.heatmap_head_params.kernel_sizes)
else:
heatmap_head_num_filters = [256]
heatmap_head_kernel_sizes = [3]
if kp_config.HasField('offset_head_params'):
offset_head_num_filters = list(kp_config.offset_head_params.num_filters)
offset_head_kernel_sizes = list(kp_config.offset_head_params.kernel_sizes)
else:
offset_head_num_filters = [256]
offset_head_kernel_sizes = [3]
if kp_config.HasField('regress_head_params'):
regress_head_num_filters = list(kp_config.regress_head_params.num_filters)
regress_head_kernel_sizes = list(
kp_config.regress_head_params.kernel_sizes)
else:
regress_head_num_filters = [256]
regress_head_kernel_sizes = [3]
return center_net_meta_arch.KeypointEstimationParams(
task_name=kp_config.task_name,
class_id=label_map_item.id - CLASS_ID_OFFSET,
keypoint_indices=keypoint_indices,
classification_loss=classification_loss,
localization_loss=localization_loss,
keypoint_labels=keypoint_labels,
keypoint_std_dev=keypoint_std_dev,
task_loss_weight=kp_config.task_loss_weight,
keypoint_regression_loss_weight=kp_config.keypoint_regression_loss_weight,
keypoint_heatmap_loss_weight=kp_config.keypoint_heatmap_loss_weight,
keypoint_offset_loss_weight=kp_config.keypoint_offset_loss_weight,
heatmap_bias_init=kp_config.heatmap_bias_init,
keypoint_candidate_score_threshold=(
kp_config.keypoint_candidate_score_threshold),
num_candidates_per_keypoint=kp_config.num_candidates_per_keypoint,
peak_max_pool_kernel_size=kp_config.peak_max_pool_kernel_size,
unmatched_keypoint_score=kp_config.unmatched_keypoint_score,
box_scale=kp_config.box_scale,
candidate_search_scale=kp_config.candidate_search_scale,
candidate_ranking_mode=kp_config.candidate_ranking_mode,
offset_peak_radius=kp_config.offset_peak_radius,
per_keypoint_offset=kp_config.per_keypoint_offset,
predict_depth=kp_config.predict_depth,
per_keypoint_depth=kp_config.per_keypoint_depth,
keypoint_depth_loss_weight=kp_config.keypoint_depth_loss_weight,
score_distance_offset=kp_config.score_distance_offset,
clip_out_of_frame_keypoints=kp_config.clip_out_of_frame_keypoints,
rescore_instances=kp_config.rescore_instances,
heatmap_head_num_filters=heatmap_head_num_filters,
heatmap_head_kernel_sizes=heatmap_head_kernel_sizes,
offset_head_num_filters=offset_head_num_filters,
offset_head_kernel_sizes=offset_head_kernel_sizes,
regress_head_num_filters=regress_head_num_filters,
regress_head_kernel_sizes=regress_head_kernel_sizes,
score_distance_multiplier=kp_config.score_distance_multiplier,
std_dev_multiplier=kp_config.std_dev_multiplier,
rescoring_threshold=kp_config.rescoring_threshold,
gaussian_denom_ratio=kp_config.gaussian_denom_ratio,
argmax_postprocessing=kp_config.argmax_postprocessing)
def object_detection_proto_to_params(od_config):
"""Converts CenterNet.ObjectDetection proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy classification loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the classification loss
# directly.
loss.classification_loss.weighted_sigmoid.CopyFrom(
losses_pb2.WeightedSigmoidClassificationLoss())
loss.localization_loss.CopyFrom(od_config.localization_loss)
_, localization_loss, _, _, _, _, _ = (losses_builder.build(loss))
if od_config.HasField('scale_head_params'):
scale_head_num_filters = list(od_config.scale_head_params.num_filters)
scale_head_kernel_sizes = list(od_config.scale_head_params.kernel_sizes)
else:
scale_head_num_filters = [256]
scale_head_kernel_sizes = [3]
if od_config.HasField('offset_head_params'):
offset_head_num_filters = list(od_config.offset_head_params.num_filters)
offset_head_kernel_sizes = list(od_config.offset_head_params.kernel_sizes)
else:
offset_head_num_filters = [256]
offset_head_kernel_sizes = [3]
return center_net_meta_arch.ObjectDetectionParams(
localization_loss=localization_loss,
scale_loss_weight=od_config.scale_loss_weight,
offset_loss_weight=od_config.offset_loss_weight,
task_loss_weight=od_config.task_loss_weight,
scale_head_num_filters=scale_head_num_filters,
scale_head_kernel_sizes=scale_head_kernel_sizes,
offset_head_num_filters=offset_head_num_filters,
offset_head_kernel_sizes=offset_head_kernel_sizes)
def object_center_proto_to_params(oc_config):
"""Converts CenterNet.ObjectCenter proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the localization loss
# directly.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(oc_config.classification_loss)
classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss))
keypoint_weights_for_center = []
if oc_config.keypoint_weights_for_center:
keypoint_weights_for_center = list(oc_config.keypoint_weights_for_center)
if oc_config.HasField('center_head_params'):
center_head_num_filters = list(oc_config.center_head_params.num_filters)
center_head_kernel_sizes = list(oc_config.center_head_params.kernel_sizes)
else:
center_head_num_filters = [256]
center_head_kernel_sizes = [3]
return center_net_meta_arch.ObjectCenterParams(
classification_loss=classification_loss,
object_center_loss_weight=oc_config.object_center_loss_weight,
heatmap_bias_init=oc_config.heatmap_bias_init,
min_box_overlap_iou=oc_config.min_box_overlap_iou,
max_box_predictions=oc_config.max_box_predictions,
use_labeled_classes=oc_config.use_labeled_classes,
keypoint_weights_for_center=keypoint_weights_for_center,
center_head_num_filters=center_head_num_filters,
center_head_kernel_sizes=center_head_kernel_sizes,
peak_max_pool_kernel_size=oc_config.peak_max_pool_kernel_size)
def mask_proto_to_params(mask_config):
"""Converts CenterNet.MaskEstimation proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(mask_config.classification_loss)
classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss))
if mask_config.HasField('mask_head_params'):
mask_head_num_filters = list(mask_config.mask_head_params.num_filters)
mask_head_kernel_sizes = list(mask_config.mask_head_params.kernel_sizes)
else:
mask_head_num_filters = [256]
mask_head_kernel_sizes = [3]
return center_net_meta_arch.MaskParams(
classification_loss=classification_loss,
task_loss_weight=mask_config.task_loss_weight,
mask_height=mask_config.mask_height,
mask_width=mask_config.mask_width,
score_threshold=mask_config.score_threshold,
heatmap_bias_init=mask_config.heatmap_bias_init,
mask_head_num_filters=mask_head_num_filters,
mask_head_kernel_sizes=mask_head_kernel_sizes)
def densepose_proto_to_params(densepose_config):
"""Converts CenterNet.DensePoseEstimation proto to parameter namedtuple."""
classification_loss, localization_loss, _, _, _, _, _ = (
losses_builder.build(densepose_config.loss))
return center_net_meta_arch.DensePoseParams(
class_id=densepose_config.class_id,
classification_loss=classification_loss,
localization_loss=localization_loss,
part_loss_weight=densepose_config.part_loss_weight,
coordinate_loss_weight=densepose_config.coordinate_loss_weight,
num_parts=densepose_config.num_parts,
task_loss_weight=densepose_config.task_loss_weight,
upsample_to_input_res=densepose_config.upsample_to_input_res,
heatmap_bias_init=densepose_config.heatmap_bias_init)
def tracking_proto_to_params(tracking_config):
"""Converts CenterNet.TrackEstimation proto to parameter namedtuple."""
loss = losses_pb2.Loss()
# Add dummy localization loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the localization loss
# directly.
loss.localization_loss.weighted_l2.CopyFrom(
losses_pb2.WeightedL2LocalizationLoss())
loss.classification_loss.CopyFrom(tracking_config.classification_loss)
classification_loss, _, _, _, _, _, _ = losses_builder.build(loss)
return center_net_meta_arch.TrackParams(
num_track_ids=tracking_config.num_track_ids,
reid_embed_size=tracking_config.reid_embed_size,
classification_loss=classification_loss,
num_fc_layers=tracking_config.num_fc_layers,
task_loss_weight=tracking_config.task_loss_weight)
def temporal_offset_proto_to_params(temporal_offset_config):
"""Converts CenterNet.TemporalOffsetEstimation proto to param-tuple."""
loss = losses_pb2.Loss()
# Add dummy classification loss to avoid the loss_builder throwing error.
# TODO(yuhuic): update the loss builder to take the classification loss
# directly.
loss.classification_loss.weighted_sigmoid.CopyFrom(
losses_pb2.WeightedSigmoidClassificationLoss())
loss.localization_loss.CopyFrom(temporal_offset_config.localization_loss)
_, localization_loss, _, _, _, _, _ = losses_builder.build(loss)
return center_net_meta_arch.TemporalOffsetParams(
localization_loss=localization_loss,
task_loss_weight=temporal_offset_config.task_loss_weight)
def _build_center_net_model(center_net_config, is_training, add_summaries):
"""Build a CenterNet detection model.
Args:
center_net_config: A CenterNet proto object with model configuration.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tf summaries in the model.
Returns:
CenterNetMetaArch based on the config.
"""
image_resizer_fn = image_resizer_builder.build(
center_net_config.image_resizer)
_check_feature_extractor_exists(center_net_config.feature_extractor.type)
feature_extractor = _build_center_net_feature_extractor(
center_net_config.feature_extractor, is_training)
object_center_params = object_center_proto_to_params(
center_net_config.object_center_params)
object_detection_params = None
if center_net_config.HasField('object_detection_task'):
object_detection_params = object_detection_proto_to_params(
center_net_config.object_detection_task)
if center_net_config.HasField('deepmac_mask_estimation'):
logging.warn(('Building experimental DeepMAC meta-arch.'
' Some features may be omitted.'))
deepmac_params = deepmac_meta_arch.deepmac_proto_to_params(
center_net_config.deepmac_mask_estimation)
return deepmac_meta_arch.DeepMACMetaArch(
is_training=is_training,
add_summaries=add_summaries,
num_classes=center_net_config.num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=object_center_params,
object_detection_params=object_detection_params,
deepmac_params=deepmac_params)
keypoint_params_dict = None
if center_net_config.keypoint_estimation_task:
label_map_proto = label_map_util.load_labelmap(
center_net_config.keypoint_label_map_path)
keypoint_map_dict = {
item.name: item for item in label_map_proto.item if item.keypoints
}
keypoint_params_dict = {}
keypoint_class_id_set = set()
all_keypoint_indices = []
for task in center_net_config.keypoint_estimation_task:
kp_params = keypoint_proto_to_params(task, keypoint_map_dict)
keypoint_params_dict[task.task_name] = kp_params
all_keypoint_indices.extend(kp_params.keypoint_indices)
if kp_params.class_id in keypoint_class_id_set:
raise ValueError(('Multiple keypoint tasks map to the same class id is '
'not allowed: %d' % kp_params.class_id))
else:
keypoint_class_id_set.add(kp_params.class_id)
if len(all_keypoint_indices) > len(set(all_keypoint_indices)):
raise ValueError('Some keypoint indices are used more than once.')
mask_params = None
if center_net_config.HasField('mask_estimation_task'):
mask_params = mask_proto_to_params(center_net_config.mask_estimation_task)
densepose_params = None
if center_net_config.HasField('densepose_estimation_task'):
densepose_params = densepose_proto_to_params(
center_net_config.densepose_estimation_task)
track_params = None
if center_net_config.HasField('track_estimation_task'):
track_params = tracking_proto_to_params(
center_net_config.track_estimation_task)
temporal_offset_params = None
if center_net_config.HasField('temporal_offset_task'):
temporal_offset_params = temporal_offset_proto_to_params(
center_net_config.temporal_offset_task)
non_max_suppression_fn = None
if center_net_config.HasField('post_processing'):
non_max_suppression_fn, _ = post_processing_builder.build(
center_net_config.post_processing)
return center_net_meta_arch.CenterNetMetaArch(
is_training=is_training,
add_summaries=add_summaries,
num_classes=center_net_config.num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=object_center_params,
object_detection_params=object_detection_params,
keypoint_params_dict=keypoint_params_dict,
mask_params=mask_params,
densepose_params=densepose_params,
track_params=track_params,
temporal_offset_params=temporal_offset_params,
use_depthwise=center_net_config.use_depthwise,
compute_heatmap_sparse=center_net_config.compute_heatmap_sparse,
non_max_suppression_fn=non_max_suppression_fn,
output_prediction_dict=center_net_config.output_prediction_dict)
def _build_center_net_feature_extractor(feature_extractor_config, is_training):
"""Build a CenterNet feature extractor from the given config."""
if feature_extractor_config.type not in CENTER_NET_EXTRACTOR_FUNCTION_MAP:
raise ValueError('\'{}\' is not a known CenterNet feature extractor type'
.format(feature_extractor_config.type))
# For backwards compatibility:
use_separable_conv = (
feature_extractor_config.use_separable_conv or
feature_extractor_config.type == 'mobilenet_v2_fpn_sep_conv')
kwargs = {
'channel_means':
list(feature_extractor_config.channel_means),
'channel_stds':
list(feature_extractor_config.channel_stds),
'bgr_ordering':
feature_extractor_config.bgr_ordering,
}
if feature_extractor_config.HasField('depth_multiplier'):
kwargs.update({
'depth_multiplier': feature_extractor_config.depth_multiplier,
})
if feature_extractor_config.HasField('use_separable_conv'):
kwargs.update({
'use_separable_conv': use_separable_conv,
})
if feature_extractor_config.HasField('upsampling_interpolation'):
kwargs.update({
'upsampling_interpolation':
feature_extractor_config.upsampling_interpolation,
})
if feature_extractor_config.HasField('use_depthwise'):
kwargs.update({
'use_depthwise': feature_extractor_config.use_depthwise,
})
return CENTER_NET_EXTRACTOR_FUNCTION_MAP[feature_extractor_config.type](
**kwargs)
META_ARCH_BUILDER_MAP = {
'ssd': _build_ssd_model,
'faster_rcnn': _build_faster_rcnn_model,
'experimental_model': _build_experimental_model,
'center_net': _build_center_net_model
}
def build(model_config, is_training, add_summaries=True):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
is_training: True if this model is being built for training purposes.
add_summaries: Whether to add tensorflow summaries in the model graph.
Returns:
DetectionModel based on the config.
Raises:
ValueError: On invalid meta architecture or model.
"""
if not isinstance(model_config, model_pb2.DetectionModel):
raise ValueError('model_config not of type model_pb2.DetectionModel.')
meta_architecture = model_config.WhichOneof('model')
if meta_architecture not in META_ARCH_BUILDER_MAP:
raise ValueError('Unknown meta architecture: {}'.format(meta_architecture))
else:
build_func = META_ARCH_BUILDER_MAP[meta_architecture]
return build_func(getattr(model_config, meta_architecture), is_training,
add_summaries)
| 56,068 | 43.358386 | 121 | py |
models | models-master/research/object_detection/builders/box_predictor_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
import collections
import tensorflow.compat.v1 as tf
from object_detection.predictors import convolutional_box_predictor
from object_detection.predictors import convolutional_keras_box_predictor
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.predictors import mask_rcnn_keras_box_predictor
from object_detection.predictors import rfcn_box_predictor
from object_detection.predictors import rfcn_keras_box_predictor
from object_detection.predictors.heads import box_head
from object_detection.predictors.heads import class_head
from object_detection.predictors.heads import keras_box_head
from object_detection.predictors.heads import keras_class_head
from object_detection.predictors.heads import keras_mask_head
from object_detection.predictors.heads import mask_head
from object_detection.protos import box_predictor_pb2
def build_convolutional_box_predictor(is_training,
num_classes,
conv_hyperparams_fn,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None):
"""Builds the ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: If True, apply the sigmoid on the output
class_predictions.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: Constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
Returns:
A ConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
apply_sigmoid_to_scores=apply_sigmoid_to_scores,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise)
other_heads = {}
return convolutional_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth)
def build_convolutional_keras_box_predictor(is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None,
name='BoxPredictor'):
"""Builds the Keras ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
Returns:
A Keras ConvolutionalBoxPredictor class.
"""
box_prediction_heads = []
class_prediction_heads = []
other_heads = {}
for stack_index, num_predictions_per_location in enumerate(
num_predictions_per_location_list):
box_prediction_heads.append(
keras_box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range,
name='ConvolutionalBoxHead_%d' % stack_index))
class_prediction_heads.append(
keras_class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
name='ConvolutionalClassHead_%d' % stack_index))
return convolutional_keras_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_heads=box_prediction_heads,
class_prediction_heads=class_prediction_heads,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
name=name)
def build_weight_shared_convolutional_box_predictor(
is_training,
num_classes,
conv_hyperparams_fn,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
keyword_args=None):
"""Builds and returns a WeightSharedConvolutionalBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
keyword_args: A dictionary with additional args.
Returns:
A WeightSharedConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = (
class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
kernel_size=kernel_size,
class_prediction_bias_init=class_prediction_bias_init,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
use_depthwise=use_depthwise,
score_converter_fn=score_converter_fn))
other_heads = {}
return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise)
def build_weight_shared_convolutional_keras_box_predictor(
is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
apply_conv_hyperparams_to_heads=False,
apply_conv_hyperparams_pointwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
name='WeightSharedConvolutionalBoxPredictor',
keyword_args=None):
"""Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to
depthwise seperable convolution layers in the box and class heads. By
default, the conv_hyperparams are only applied to layers in the predictor
tower when using depthwise separable convolutions.
apply_conv_hyperparams_pointwise: Whether to apply the conv_hyperparams to
the pointwise_initializer and pointwise_regularizer when using depthwise
separable convolutions. By default, conv_hyperparams are only applied to
the depthwise initializer and regularizer when use_depthwise is true.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
keyword_args: A dictionary with additional args.
Returns:
A Keras WeightSharedConvolutionalBoxPredictor class.
"""
if len(set(num_predictions_per_location_list)) > 1:
raise ValueError('num predictions per location must be same for all'
'feature maps, found: {}'.format(
num_predictions_per_location_list))
num_predictions_per_location = num_predictions_per_location_list[0]
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads,
box_encodings_clip_range=box_encodings_clip_range,
name='WeightSharedConvolutionalBoxHead')
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads,
score_converter_fn=score_converter_fn,
name='WeightSharedConvolutionalClassHead')
other_heads = {}
return (
convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise,
apply_conv_hyperparams_pointwise=apply_conv_hyperparams_pointwise,
name=name))
def build_mask_rcnn_keras_box_predictor(is_training,
num_classes,
fc_hyperparams,
freeze_batchnorm,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNKerasBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for fully connected dense ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNKerasBoxPredictor class.
"""
box_prediction_head = keras_box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
freeze_batchnorm=freeze_batchnorm,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_mask_rcnn_box_predictor(is_training,
num_classes,
fc_hyperparams_fn,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams_fn=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for fully connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNBoxPredictor class.
"""
box_prediction_head = box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead(
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_box_predictor.MaskRCNNBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_score_converter(score_converter_config, is_training):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid] score converters based on the config
and whether the BoxPredictor is for training or inference.
Args:
score_converter_config:
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
is_training: Indicates whether the BoxPredictor is in training mode.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
return tf.identity
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
return tf.identity if is_training else tf.sigmoid
raise ValueError('Unknown score converter.')
BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange',
['min', 'max'])
def build(argscope_fn, box_predictor_config, is_training, num_classes,
add_background_class=True):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams,
is_training)
conv_hyperparams_fn = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams_fn = argscope_fn(
config_box_predictor.conv_hyperparams, is_training)
return build_mask_rcnn_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update,
num_predictions_per_location_list, box_predictor_config,
is_training, num_classes, add_background_class=True):
"""Builds a Keras-based box predictor based on the configuration.
Builds Keras-based box predictor based on the configuration.
See box_predictor.proto for configurable options. Also, see box_predictor.py
for more details.
Args:
hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams
proto and returns a `hyperparams_builder.KerasLayerHyperparams`
for Conv or FC hyperparameters.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.KerasBoxPredictor object.
Raises:
ValueError: On unknown box predictor, or one with no Keras box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly. This is
# required because during TPU inference, model.postprocess is not called.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
add_background_class=add_background_class,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
apply_conv_hyperparams_to_heads=(
config_box_predictor.apply_conv_hyperparams_to_heads),
apply_conv_hyperparams_pointwise=(
config_box_predictor.apply_conv_hyperparams_pointwise),
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams)
conv_hyperparams = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
return build_mask_rcnn_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams=conv_hyperparams,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError(
'Unknown box predictor for Keras: {}'.format(box_predictor_oneof))
| 48,110 | 47.450151 | 80 | py |
models | models-master/research/object_detection/builders/optimizer_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
import tensorflow.compat.v1 as tf
from object_detection.utils import learning_schedules
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from official.modeling.optimization import ema_optimizer
# pylint: enable=g-import-not-at-top
try:
from tensorflow.contrib import opt as tf_opt # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
def build_optimizers_tf_v1(optimizer_config, global_step=None):
"""Create a TF v1 compatible optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=config.epsilon)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
optimizer = tf_opt.MovingAverageOptimizer(
optimizer, average_decay=optimizer_config.moving_average_decay)
return optimizer, summary_vars
def build_optimizers_tf_v2(optimizer_config, global_step=None):
"""Create a TF v2 compatible optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.SGD(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=config.epsilon)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
optimizer = ema_optimizer.ExponentialMovingAverage(
optimizer=optimizer,
average_decay=optimizer_config.moving_average_decay)
return optimizer, summary_vars
def build(config, global_step=None):
if tf.executing_eagerly():
return build_optimizers_tf_v2(config, global_step)
else:
return build_optimizers_tf_v1(config, global_step)
def _create_learning_rate(learning_rate_config, global_step=None):
"""Create optimizer learning rate based on config.
Args:
learning_rate_config: A LearningRate proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
if global_step is None:
global_step = tf.train.get_or_create_global_step()
learning_rate = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'constant_learning_rate':
config = learning_rate_config.constant_learning_rate
learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
name='learning_rate')
if learning_rate_type == 'exponential_decay_learning_rate':
config = learning_rate_config.exponential_decay_learning_rate
learning_rate = learning_schedules.exponential_decay_with_burnin(
global_step,
config.initial_learning_rate,
config.decay_steps,
config.decay_factor,
burnin_learning_rate=config.burnin_learning_rate,
burnin_steps=config.burnin_steps,
min_learning_rate=config.min_learning_rate,
staircase=config.staircase)
if learning_rate_type == 'manual_step_learning_rate':
config = learning_rate_config.manual_step_learning_rate
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
learning_rate_step_boundaries = [x.step for x in config.schedule]
learning_rate_sequence = [config.initial_learning_rate]
learning_rate_sequence += [x.learning_rate for x in config.schedule]
learning_rate = learning_schedules.manual_stepping(
global_step, learning_rate_step_boundaries,
learning_rate_sequence, config.warmup)
if learning_rate_type == 'cosine_decay_learning_rate':
config = learning_rate_config.cosine_decay_learning_rate
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step,
config.learning_rate_base,
config.total_steps,
config.warmup_learning_rate,
config.warmup_steps,
config.hold_base_rate_steps)
if learning_rate is None:
raise ValueError('Learning_rate %s not supported.' % learning_rate_type)
return learning_rate
| 7,782 | 35.369159 | 85 | py |
Subsets and Splits